Former-commit-id: 6a6c706b3beee3af4dfbc52150b6d2fa1201dae9
Former-commit-id: c379c6e3979e30077979ec6745f8263a15bd3ee1
1.0
wanggang 4 years ago
parent cf4486807a
commit c6fbd969d2

@ -1,23 +0,0 @@
# ...
# 可选项: tcp(默认), kafka, RocketMQ
canal.serverMode = kafka
# ...
# kafka/rocketmq 集群配置: 192.168.1.117:9092,192.168.1.118:9092,192.168.1.119:9092
canal.mq.servers = kafka:9092
canal.mq.retries = 0
# flagMessage模式下可以调大该值, 但不要超过MQ消息体大小上限
canal.mq.batchSize = 16384
canal.mq.maxRequestSize = 1048576
# flatMessage模式下请将该值改大, 建议50-200
canal.mq.lingerMs = 1
canal.mq.bufferMemory = 33554432
# Canal的batch size, 默认50K, 由于kafka最大消息体限制请勿超过1M(900K以下)
canal.mq.canalBatchSize = 50
# Canal get数据的超时时间, 单位: 毫秒, 空为不限超时
canal.mq.canalGetTimeout = 100
# 是否为flat json格式对象
canal.mq.flatMessage = false
canal.mq.compressionType = none
canal.mq.acks = all
# kafka消息投递是否使用事务
canal.mq.transaction = false

@ -17,6 +17,8 @@ services:
restart: always
ports:
- 2181:2181
volumes:
- ./data/zookeeper:/data
#/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mysql-server.example.User
kafka:
image: wurstmeister/kafka:2.13-2.7.0
@ -32,6 +34,10 @@ services:
- 9092:9092
depends_on:
- zookeeper
kafkacat:
image: edenhill/kafkacat:1.6.0
restart: always
command: ["kafkacat", "-C", "-b", "kafka:9092", "-t", "mysql.example.User"]
kafka-manager:
image: kafkamanager/kafka-manager:3.0.0.4
restart: always
@ -66,24 +72,6 @@ services:
- ui.base.uri=http://localhost:8080/api
ports:
- 8080:8080
canal:
image: canal/canal-server:v1.1.5
privileged: true
restart: always
ports:
- 11111:11111
environment:
- canal.auto.scan=false
- canal.instance.master.address=$IP:3306
- canal.instance.dbUsername=slave
- canal.instance.dbPassword=aA123456!
- canal.instance.filter.regex=.*\\..*
- canal.instance.filter.black.regex=mysql\\.slave_.*
- canal.instance.connectionCharset=UTF-8
- canal.instance.tsdb.enable=true
depends_on:
- mysql
command: bash -c "run.sh"
doris-be:
image: primetoninc/jdk:1.8
ports:

Loading…
Cancel
Save