Former-commit-id: 2208602fad9fdbf9c87b1f390121a5ad9959e333
Former-commit-id: ea2736c86b6226554ebd361d99820de21a0c29af
1.0
wanggang 4 years ago
parent d9725054cc
commit d8932dd800

@ -1 +1 @@
curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" http://localhost:8083/connectors/ -d @config.json
curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" http://debezium:8083/connectors/ -d @config.json

@ -1,19 +0,0 @@
--
CREATE ROUTINE LOAD db1.job1 on tbl1
PROPERTIES
(
"desired_concurrent_number"="1"
)
FROM KAFKA
(
"kafka_broker_list"= "broker1:9091,broker2:9091",
"kafka_topic" = "my_topic",
"property.security.protocol" = "ssl",
"property.ssl.ca.location" = "FILE:ca.pem",
"property.ssl.certificate.location" = "FILE:client.pem",
"property.ssl.key.location" = "FILE:client.key",
"property.ssl.key.password" = "abcdefg"
);

@ -19,7 +19,6 @@ services:
- 2181:2181
volumes:
- ./data/zookeeper:/data
#/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mysql-server.example.User
kafka:
image: wurstmeister/kafka:2.13-2.7.0
restart: always
@ -65,6 +64,8 @@ services:
- STATUS_STORAGE_TOPIC=my_connect_statuses
ports:
- 8083:8083
volumes:
- ./conf/debezium:/start
debezium-ui:
image: debezium/debezium-ui:1.5
restart: always
@ -91,7 +92,7 @@ services:
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
volumes:
volumes:
- ./data/kafka-connect/:/data
- ./conf/kafka/connectors/confluentinc-kafka-connect-elasticsearch:/usr/share/java/confluentinc-kafka-connect-elasticsearch
#command: bash -c "tail -f /dev/null"
@ -109,29 +110,13 @@ services:
volumes:
#chmod 777 elasticsearch
- ./data/elasticsearch:/usr/share/elasticsearch/data
kibana:
kibana:
image: kibana:7.12.0
restart: always
ports:
ports:
- 9601:5601
depends_on:
- elasticsearch
doris-be:
image: primetoninc/jdk:1.8
ports:
- 8040:8040
- 8060:8060
- 9050:9050
- 9060:9060
environment:
- priority_networks=$IP/28
volumes:
- ./apps/doris:/doris
- ./conf/doris/be.conf:/doris/be/conf/be.conf
- ./data/doris/be/storage:/doris/be/storage
command: bash -c "/doris/be/bin/start_be.sh"
#SET PASSWORD FOR 'root' = PASSWORD('aA123456!');
#启动耗时长
doris-fe:
image: primetoninc/jdk:1.8
ports:
@ -140,6 +125,7 @@ services:
- 9020:9020
- 9030:9030
environment:
- TZ="Asia/Shanghai"
- priority_networks=$CIDR
volumes:
- ./apps/doris:/doris
@ -147,6 +133,22 @@ services:
- ./log/doris/fe:/doris/fe/log
- ./data/doris/fe/doris-meta:/doris/fe/doris-meta
command: bash -c "/doris/fe/bin/start_fe.sh"
doris-be:
image: primetoninc/jdk:1.8
ports:
- 8040:8040
- 8060:8060
- 9050:9050
- 9060:9060
environment:
- TZ="Asia/Shanghai"
- priority_networks=$CIDR
volumes:
- ./apps/doris:/doris
- ./conf/doris/be.conf:/doris/be/conf/be.conf
- ./data/doris/be/storage:/doris/be/storage
- ./log/doris/be/:/doris/be/log
command: bash -c "/doris/be/bin/start_be.sh"
#doris kafka Routine Load https://cloud.baidu.com/doc/PALO/s/ykizgdgv8
#doris 不支持 superset 和 metabase以后通过二次开发解决
#https://hub.docker.com/r/apache/superset

@ -4,20 +4,98 @@
mysql配置了主从创建了数据库example新建了User表
## debezium
进入容器执行 /start 目录,执行 ./start.sh
## debezium-ui
查看debezium状态: http://localhost:8080
## kafka
通过 kafkacat 查看实时信息
## kafaka-connect
进入容器执行 /usr/share/java/confluentinc-kafka-connect-elasticsearch 目录,执行 ./start.sh
## kibana
查看 elasticsearch 状态: http://localhost:9601
## doris
进入 mysql 容器内,连接 doris fe
### 进入 mysql 容器内,连接 doris fe
mysql -h doris-fe -P 9030 -u root
设置root密码
### 设置root密码
SET PASSWORD FOR 'root' = PASSWORD('aA123456!');
添加 doris be
### 添加 doris be
查看 be 的 log 可以查看到当前的ip测试时使用该ip替换hostname或固定容器ip设置.env
ALTER SYSTEM ADD BACKEND "hostname:9050";
### 移除 doris be
ALTER SYSTEM DROPP BACKEND "hostname:9050";
### 查看be状态
SHOW PROC '/backends';
### 创建数据库:
CREATE DATABASE example;
### 切换数据库:
USE example;
### 创建表:
ALTER SYSTEM ADD BACKEND "192.168.100.144:9050";
CREATE TABLE `User` (
`Id` char(36) NOT NULL COMMENT 'Id',
`UserName` varchar(255) NOT NULL COMMENT 'UserName',
`SecurityStamp` varchar(255) REPLACE NULL COMMENT 'SecurityStamp',
`PasswordHash` varchar(255) REPLACE NULL COMMENT 'PasswordHash',
`PasswordConfirmed` tinyint(1) REPLACE NOT NULL COMMENT 'PasswordConfirmed',
`Email` varchar(255) REPLACE NULL DEFAULT NULL COMMENT 'Email',
`EmailConfirmed` tinyint(1) REPLACE NOT NULL COMMENT 'EmailConfirmed',
`PhoneNumber` varchar(255) REPLACE NULL DEFAULT NULL COMMENT 'PhoneNumber',
`PhoneNumberConfirmed` tinyint(1) REPLACE NOT NULL COMMENT 'PhoneNumberConfirmed',
`RealName` varchar(255) REPLACE NULL COMMENT 'RealName',
`IdentityNumber` varchar(255) REPLACE NULL COMMENT 'IdentityNumber',
`IdentityConfirmed` tinyint(1) REPLACE NOT NULL COMMENT 'IdentityConfirmed',
`NickName` varchar(255) REPLACE NULL COMMENT 'NickName',
`Avatar` varchar(255) REPLACE NULL COMMENT 'Avatar',
`Sex` int(0) REPLACE NULL DEFAULT NULL COMMENT 'Sex',
`Birthday` datetime REPLACE NULL DEFAULT NULL COMMENT 'Birthday',
`LockoutEnabled` tinyint(1) REPLACE NOT NULL COMMENT 'LockoutEnabled',
`AccessFailedCount` int(0) REPLACE NOT NULL COMMENT 'AccessFailedCount',
`LockoutEnd` datetime REPLACE NULL DEFAULT NULL COMMENT 'LockoutEnd',
`RowVersion` varchar(255) REPLACE NULL COMMENT 'RowVersion',
`Created` datetime REPLACE NOT NULL COMMENT 'Created',
`Modified` datetime REPLACE NULL DEFAULT NULL COMMENT 'Modified',
`Deleted` datetime REPLACE NULL DEFAULT NULL COMMENT 'Deleted'
)
AGGREGATE KEY(Id,UserName)
DISTRIBUTED BY HASH(Id) BUCKETS 10
PROPERTIES("replication_num" = "1");
查看be状态
=======================================================================
从kafka导入
SHOW PROC '/backends';
CREATE ROUTINE LOAD example.job1 on User
PROPERTIES
(
"desired_concurrent_number"="1"
)
FROM KAFKA
(
"kafka_broker_list"= "kafka:9092",
"kafka_topic" = "mysql.example.User"
);
Loading…
Cancel
Save