Former-commit-id: 6eedbeba4d2534f6bf7d217b5cd226fcbaea3839
Former-commit-id: 5e6031bb8b5f96a8221c754050245602f5d30d34
1.0
wanggang 4 years ago
parent b221889721
commit a9db2f01b0

@ -0,0 +1 @@
export IP=192.168.100.144

@ -8,6 +8,13 @@
<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="1.6.3" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="5.0.0" />
<PackageReference Include="Microsoft.Extensions.Http" Version="5.0.0" />
</ItemGroup>
<ItemGroup>
<None Update="appsettings.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>

@ -2,26 +2,39 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using System;
using System.IO;
using System.Reflection;
using System.Runtime.InteropServices;
namespace Kafka2Doris
{
internal class Program
public class Program
{
private static void Main(string[] args)
{
var isWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows);
var rid = isWindows ? "win-x64" : "linux-x64";
var file = isWindows ? "librdkafka.dll" : "librdkafka.so";
var path = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "runtimes",rid, "native", file);
Confluent.Kafka.Library.Load(path);
var config = new ConfigurationBuilder()
.SetBasePath(Directory.GetCurrentDirectory())
.AddJsonFile("appsettings.json")
.AddEnvironmentVariables()
.AddCommandLine(args)
.Build();
Host.CreateDefaultBuilder(args)
.ConfigureLogging(o =>
{
o.AddConsole();
})
.ConfigureAppConfiguration((hostingContext, configuration) =>
{
configuration
.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true)
.AddJsonFile($"appsettings.{hostingContext.HostingEnvironment.EnvironmentName}.json", true, true);
})
.ConfigureServices((hostingContext, services) =>
{
services.AddLogging();
services.AddSingleton(config);
services.AddHttpClient();
services.AddHostedService<Worker>();
})
.Build()

@ -1,7 +1,10 @@
using Microsoft.Extensions.Configuration;
using Confluent.Kafka;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Threading;
using System.Threading.Tasks;
@ -11,11 +14,13 @@ namespace Kafka2Doris
{
private readonly ILogger<Worker> _logger;
private readonly IConfiguration _config;
private readonly IHttpClientFactory _httpClientFactory;
public Worker(ILogger<Worker> logger, IConfiguration config)
public Worker(ILogger<Worker> logger, IConfiguration config,IHttpClientFactory httpClientFactory)
{
this._logger = logger;
this._config = config;
this._httpClientFactory = httpClientFactory;
}
protected override async Task ExecuteAsync(CancellationToken stoppingToken)
@ -23,7 +28,73 @@ namespace Kafka2Doris
while (!stoppingToken.IsCancellationRequested)
{
_logger.LogInformation("Worker running at: {time}", DateTimeOffset.Now);
//读取kafka
//
var topics = _config["topics"].Split(',');
foreach (var topic in topics)
{
var conf = new ConsumerConfig
{
BootstrapServers = _config.GetValue("server", "localhost:9092"),
GroupId = $"kafka2doris",
AutoOffsetReset = AutoOffsetReset.Earliest
};
var timeout = TimeSpan.FromSeconds(_config.GetValue("timeout",30));
try
{
using (var consumer = new ConsumerBuilder<Ignore, string>(conf).Build())
{
try
{
consumer.Subscribe(topic);
CancellationTokenSource cts = new CancellationTokenSource();
Console.CancelKeyPress += (_, e) => {
e.Cancel = true; // prevent the process from terminating.
cts.Cancel();
};
try
{
var max = _config.GetValue("max", 1000);
var list = new List<string>(max);
while (max>0)
{
try
{
var consumeResult = consumer.Consume(cts.Token);
list.Add(consumeResult.Message.Value);
}
catch (ConsumeException e)
{
Console.WriteLine($"Error occured: {e.Error.Reason}");
break;
}
max -= 1;
}
if(list.Count>0)
{
var httpClient = this._httpClientFactory.CreateClient();
//httpClient.PutAsync
//consumer.Commit();
}
}
catch (OperationCanceledException)
{
// Ensure the consumer leaves the group cleanly and final offsets are committed.
consumer.Close();
}
}
catch (Exception ex)//DbUpdateException//ProduceException<Null,string>
{
Console.WriteLine(ex.Message);
}
}
}
catch (Exception ex)
{
this._logger.LogError(ex.ToString());
}
}
//
await Task.Delay(this._config.GetValue("delay", 1000 * 60), stoppingToken);
}
}

@ -1,4 +1,10 @@
{
"delay": 60,
"group": 1000
}
"Logging": {
"LogLevel": {
"Default": "Warning",
"Microsoft": "Warning"
}
},
"kafka": "localhost:9092",
"topics": "mysql.example.User"
}

@ -31,7 +31,7 @@ services:
environment:
KAFKA_HEAP_OPTS: "-Xmx512m -Xms512m"
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_HOST_NAME: 172.172.0.21
KAFKA_ADVERTISED_HOST_NAME: ${ip}
num.partitions: 1
volumes:
- ./data/kafka:/kafka
@ -116,7 +116,6 @@ services:
environment:
- priority_networks=172.172.0.0/24
volumes:
- ./apps/doris:/doris
- ./conf/doris/fe.conf:/doris/fe/conf/fe.conf
- ./log/doris/fe:/doris/fe/log
- ./data/doris/fe/doris-meta:/doris/fe/doris-meta
@ -134,7 +133,6 @@ services:
environment:
- priority_networks=172.172.0.0/24
volumes:
- ./apps/doris:/doris
- ./conf/doris/be.conf:/doris/be/conf/be.conf
- ./data/doris/be/storage:/doris/be/storage
- ./log/doris/be/:/doris/be/log

Loading…
Cancel
Save