docker-compose部署部分中间件
docker-compose部署部分中间件
docker-compose 部署 多种中间件的合计篇
nacos单机+开源工具站
version: "3"
services:
nacos:
image: nacos/nacos-server:v2.0.4
container_name: nacos
ports:
- 8848:8848
environment:
TZ: Asia/Shanghai
MODE: standalone
PREFER_HOST_MODE: hostname
NACOS_SERVER_IP: 101.126.22.188
volumes:
- /root/docker/nacos/logs/:/home/nacos/logs
it-tools:
container_name: it-tools
restart: unless-stopped
ports:
- '9999:80'
image: 'corentinth/it-tools:latest'
ELK
https://hulupet.cn/2023/10/31/1698731962206 中有部署ELK文档
version: "3"
services:
elasticsearch:
container_name: elasticsearch # 容器名称
restart: always
image: docker.elastic.co/elasticsearch/elasticsearch:7.12.1 # 使用的Elasticsearch镜像
environment:
- ES_JAVA_OPTS=-Xms512m -Xmx512m # 设置Java虚拟机选项
- discovery.type=single-node # 单节点模式
- TAKE_FILE_OWNERSHIP=true # 获取文件所有权
- bootstrap.memory_lock=true # 锁定内存
volumes:
- /root/docker/elasticsearch/logs:/usr/share/elasticsearch/logs # Elasticsearch日志卷
- /root/docker/elasticsearch/data:/usr/share/elasticsearch/data # Elasticsearch数据卷
- /root/docker/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml # Elasticsearch配置文件卷
- /root/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins # Elasticsearch插件卷
privileged: true # 使用特权模式
networks:
- elk-net # 使用的网络
ports:
- "9200:9200" # 端口映射
- "9300:9300"
ulimits:
memlock:
soft: -1
hard: -1
logstash:
container_name: logstash # 容器名称
restart: always
image: docker.elastic.co/logstash/logstash:7.12.1 # 使用的Logstash镜像
volumes:
- /root/docker/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml # Logstash配置卷
- /root/docker/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf # Logstash管道卷
networks:
- elk-net # 使用的网络
ports:
- "5044:5044" # 端口映射
- "9600:9600"
kibana:
container_name: kibana # 容器名称
restart: always
image: docker.elastic.co/kibana/kibana:7.12.1 # 使用的Kibana镜像
privileged: true # 使用特权模式
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200 # 使用服务名称而不是容器名称
- I18N_LOCALE=zh-CN # 设置国际化区域
volumes:
- /root/docker/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml # Kibana配置卷
networks:
- elk-net # 使用的网络
ports:
- "5601:5601" # 端口映射
networks:
elk-net:
external:
name: elk-net # 外部网络名称
Jenkins
初步体验完成,转战jdk17学习+codeMap开发 -- 2024/01/23
Docker版本(页面访问慢)
git安装
yum install -y git
maven安装
mkdir -p /opt/env/maven && cd /opt/env/maven
# 官网下载maven apache-maven-3.9.6-bin.tar.gz
https://maven.apache.org/download.cgi
docker-compose文件
version: '3'
services: # 集合
docker_jenkins:
user: root # 为了避免一些权限问题 在这我使用了root
restart: always # 重启方式
image: jenkins/jenkins:lts # 指定服务所使用的镜像 在这里我选择了 LTS (长期支持)
container_name: jenkins # 容器名称
ports: # 对外暴露的端口定义
- 8050:8080 # 访问Jenkins服务端口
- 50000:50000
volumes:
- /usr/maven/apache-maven-3.9.6:/usr/local/apache-maven-3.9.6
- /usr/bin/git:/usr/local/git
- /root/docker/jenkins/jenkins_home/:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock
- /usr/bin/docker:/usr/bin/docker
- /usr/local/bin/docker-compose:/usr/local/bin/docker-compose
- /etc/localtime:/etc/localtime
原生安装(页面响应迅速)
git安装
yum install -y git
maven安装
mkdir -p /opt/env/maven && cd /opt/env/maven
# 官网下载maven apache-maven-3.9.6-bin.tar.gz
https://maven.apache.org/download.cgi
tomcat安装
# 下载tomcat 清华镜像
wget https://mirrors.tuna.tsinghua.edu.cn/apache/tomcat/tomcat-9/v9.0.83/bin/apache-tomcat-9.0.83.tar.gz --no-check-certificate
# 解压
tar -zxvf apache-tomcat-9.0.83.tar.gz
vim /etc/profile
export CATALINA_HOME=/opt/tomcat/apache-tomcat-9.0.83
export PATH=${CATALINA_HOME}/bin:$PATH
# 重新加载生效
source /etc/profile
vim /etc/systemd/system/tomcat.service
[Unit]
Description=Tomcat Server
After=syslog.target network.target
[Service]
Type=forking
Environment="JAVA_HOME=/opt/jdk/jdk-17.0.10"
ExecStart=/opt/tomcat/apache-tomcat-9.0.83/bin/startup.sh
ExecStop=/opt/tomcat/apache-tomcat-9.0.83/bin/shutdown.sh
ExecReload=/opt/tomcat/apache-tomcat-9.0.83/bin/shutdown.sh && /opt/tomcat/apachh
e-tomcat-9.0.83/bin/startup.sh
KillMode=process
[Install]
WantedBy=multi-user.target
# 修改 tomcat8.service 的权限:
chmod 755 /etc/systemd/system/tomcat.service
# 重新加载系统服务:
systemctl daemon-reload
# 设置服务开机自启:
systemctl enable tomcat
# 查看当前状态
systemctl status tomcat
# 启动
systemctl start tomcat
jdk17安装
# 从官网下载jdk17的tar.gz包,然后解压
# 配置环境变量
vim /etc/profile
export JAVA_HOME=/opt/jdk/jdk-17.0.10
export MAVEN_HOME=/usr/maven/apache-maven-3.9.6
export CLASSPATH=.:${JAVA_HOME}/lib/tools.jar:${JAVA_HOME}/lib/dt.jar
export CATALINA_HOME=/opt/tomcat/apache-tomcat-9.0.83
export PATH=$PATH:${JAVA_HOME}/bin:${MAVEN_HOME}/bin:${CATALINA_HOME}/bin
# 让修改的配置生效
source /ect/profile
jenkins安装
# 官网下载jenkins的war
# rz 上传jenkins的war包
cd /opt/tomcat/apache-tomcat-9.0.83/webapps
# 重启tomcat即可
systemctl restart tomcat
RocketMQ集群
version: '3.3'
services:
rmqnamesrv-a:
image: apache/rocketmq:4.9.4
container_name: rmqnamesrv-a
ports:
- 9876:9876
volumes:
- /rocketmq/logs/nameserver-a:/home/rocketmq/logs
- /rocketmq/broker-a/broker-a.conf:/home/rocketmq/rocketmq-4.9.4/conf/broker.conf
command: sh mqnamesrv
networks:
rmq:
aliases:
- rmqnamesrv-a
rmqnamesrv-b:
image: apache/rocketmq:4.9.4
container_name: rmqnamesrv-b
ports:
- 9877:9876
volumes:
- /rocketmq/logs/nameserver-b:/home/rocketmq/logs
- /rocketmq/broker-b/broker-b.conf:/home/rocketmq/rocketmq-4.9.4/conf/broker.conf
command: sh mqnamesrv
networks:
rmq:
aliases:
- rmqnamesrv-b
rmqbroker-a:
image: apache/rocketmq:4.9.4
container_name: rmqbroker-a
ports:
- 10911:10911
volumes:
- /rocketmq/logs/broker-a/logs:/home/rocketmq/logs
- /rocketmq/store/broker-a/store:/home/rocketmq/store
- /rocketmq/broker-a/broker-a.conf:/home/rocketmq/rocketmq-4.9.4/conf/broker.conf
environment:
TZ: Asia/Shanghai
NAMESRV_ADDR: "rmqnamesrv-a:9876"
JAVA_OPTS: " -Duser.home=/opt"
JAVA_OPT_EXT: "-server -Xms256m -Xmx256m -Xmn256m"
command: sh mqbroker -c /home/rocketmq/rocketmq-4.9.4/conf/broker.conf
links:
- rmqnamesrv-a:rmqnamesrv-a
- rmqnamesrv-b:rmqnamesrv-b
networks:
rmq:
aliases:
- rmqbroker-a
rmqbroker-b:
image: apache/rocketmq:4.9.4
container_name: rmqbroker-b
ports:
- 10912:10912
volumes:
- /rocketmq/logs/broker-b/logs:/home/rocketmq/logs
- /rocketmq/store/broker-b/store:/home/rocketmq/store
- /rocketmq/broker-b/broker-b.conf:/home/rocketmq/rocketmq-4.9.4/conf/broker.conf
environment:
TZ: Asia/Shanghai
NAMESRV_ADDR: "rmqnamesrv-b:9877"
JAVA_OPTS: " -Duser.home=/opt"
JAVA_OPT_EXT: "-server -Xms256m -Xmx256m -Xmn256m"
command: sh mqbroker -c /home/rocketmq/rocketmq-4.9.4/conf/broker.conf
links:
- rmqnamesrv-a:rmqnamesrv-a
- rmqnamesrv-b:rmqnamesrv-b
networks:
rmq:
aliases:
- rmqbroker-b
rmqconsole:
image: apacherocketmq/rocketmq-dashboard
container_name: rmqconsole
ports:
- 8087:8080
environment:
JAVA_OPTS: -Drocketmq.namesrv.addr=rmqnamesrv-a:9876;rmqnamesrv-b:9877 -Dcom.rocketmq.sendMessageWithVIPChannel=false -Drocketmq.config.accessKey=rocketmq2 -Drocketmq.config.secretKey=12345678
volumes:
- /rocketmq/console-ng/data:/tmp/rocketmq-console/data
networks:
rmq:
aliases:
- rmqconsole
networks:
rmq:
name: rmq
driver: bridge
maven私服
version: "3"
services:
nexus:
image: sonatype/nexus3
container_name: nexus3
restart: always
environment:
- TZ=Asia/Shanghai
ports:
- 8068:8081
volumes:
- /root/docker/nexus-data:/nexus-data
zk集群
version: '3'
# 给zk集群配置一个网络,网络名为zk-net
networks:
zk-net:
name: zk-net
# 配置zk集群的
# container services下的每一个子配置都对应一个zk节点的docker container
services:
zk1:
# docker container所使用的docker image
image: zookeeper
hostname: zk1
container_name: zk1
# 配置docker container和宿主机的端口映射
ports:
- 2181:2181
- 8081:8080
# 配置docker container的环境变量
environment:
# 当前zk实例的id
ZOO_MY_ID: 1
# 整个zk集群的机器、端口列表
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zk2:2888:3888;2181 server.3=zk3:2888:3888;2181
# 将docker container上的路径挂载到宿主机上 实现宿主机和docker container的数据共享
volumes:
- /root/docker/zookeeper/zk1/data:/data
- /root/docker/zookeeper/zk1/datalog:/datalog
# 当前docker container加入名为zk-net的隔离网络
networks:
- zk-net
zk2:
image: zookeeper
hostname: zk2
container_name: zk2
ports:
- 2182:2181
- 8082:8080
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zk1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zk3:2888:3888;2181
volumes:
- /root/docker/zookeeper/zk2/data:/data
- /root/docker/zookeeper/zk2/datalog:/datalog
networks:
- zk-net
zk3:
image: zookeeper
hostname: zk3
container_name: zk3
ports:
- 2183:2181
- 8083:8080
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zk1:2888:3888;2181 server.2=zk2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
volumes:
- /root/docker/zookeeper/zk3/data:/data
- /root/docker/zookeeper/zk3/datalog:/datalog
networks:
- zk-net
minio21年版简洁
version: '3'
services:
minio:
image: minio/minio:RELEASE.2021-03-04T00-53-13Z
container_name: minio # 容器名(以后的控制都通过这个)
ports:
- 9000:9000
environment:
MINIO_ROOT_USER: 用户名 #管理后台用户名
MINIO_ROOT_PASSWORD: 密码 #管理后台密码,最小8个字符
volumes:
- /root/docker/minio/data:/data #映射当前目录下的data目录至容器内/data目录
- /root/docker/minio/config:/root/.minio/ #映射配置目录
command: server /data #指定容器中的目录 /data
privileged: true
restart: always # 重启策略
mysql单机
version: '3'
services:
mysql8.0:
image: mysql:8.0.21 # 镜像名 # 镜像名
container_name: mysql8.0 # 容器名(以后的控制都通过这个)
restart: always # 重启策略
environment:
TZ: Asia/Shanghai # 时区上海
MYSQL_ROOT_PASSWORD: root # root 密码
ports: # 映射端口
- 3306:3306
volumes:
- /root/docker/mysql/data/:/var/lib/mysql/ # 数据挂载
- /root/docker/mysql/conf/:/etc/mysql/conf.d/ # 配置挂载 (验证失败放入my.cnf)
- /root/docker/mysql/init/:/docker-entrypoint-initdb.d/ # 初始化目录挂载
command: # 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
mkdir -p /root/docker/mysql/{data,conf,init}
######### 【docker-compose 启动后再进行的后续操作】 ######################
# 登录到mysql的容器
docker exec -it mysql8.0 bash
mysql -u root -p
show databases;
# 开启xx用户可远程访问权限
Ab123@wcj-root
GRANT ALL ON *.* TO 'root'@'%';
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'Ab123@wcj-root';
# 刷新权限
FLUSH PRIVILEGES;
# 即可远程登录,别忘记放开防火墙
⭐mysql集群读写分离主从
DockerCompose安装Mysql配置读写分离_docker-compose mysql读写分离-CSDN博客
version: '3.0'
services:
master_mysql:
image: mysql:8.0.21
restart: always
container_name: master_mysql
privileged: true
ports:
- 3306:3306
environment:
MYSQL_ROOT_PASSWORD: 123456
TZ: Asia/Shanghai
volumes:
- /root/docker/mysql/master_mysql/data:/var/lib/mysql
- /root/docker/mysql/master_mysql/conf:/etc/mysql/conf.d
command: # 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
slave_mysql:
image: mysql:8.0.21
restart: always
container_name: slave_mysql
privileged: true
ports:
- 3307:3306
environment:
MYSQL_ROOT_PASSWORD: 123456
TZ: Asia/Shanghai
volumes:
- /root/docker/mysql/slave_mysql/data:/var/lib/mysql
- /root/docker/mysql/slave_mysql/conf:/etc/mysql/conf.d
command: # 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
vim /root/docker/mysql/slave_mysql/conf/my.cnf
# ========== 从库配置
[mysqld]
# 同一局域网内注意要唯一
server-id=200
## 开启二进制日志功能,可以随便取(关键)
log-bin=mysql-bin
## 设置为只读,该项如果不设置,表示slave可读可写
read_only= 1
#skip-grant-tables
vim /root/docker/mysql/master_mysql/conf/my.cnf
# ========== 主库配置
[mysqld]
## 同一局域网内注意要唯一
server-id=100
## 开启二进制日志功能,可以随便取(关键)
log-bin=mysql-bin
#skip-grant-tables
# 当docker启动后,我们navicat是连不到的,设置的初始密码其实也不生效,那么修改对应的my.cnf配置。
#skip-grant-tables 放开这个注解后,mysql -u root -p 直接回车俩下就进入了mysql不需要密码。
# 登录到mysql的容器
docker exec -it master_mysql bash
mysql -u root -p
# 开启xx用户可远程访问权限
GRANT ALL ON *.* TO 'root'@'%';
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'Ab123@wcj-root';
# 刷新权限
FLUSH PRIVILEGES;
# 即可远程登录,别忘记放开防火墙
# (从库也同样如上设置)
# 主库执行sql,记录下结果
show master status;
# 从库执行sql,把从库的结构修改一下下面
change master to
master_host='192.168.101.65',
master_user='root',
master_password='Ab123@wcj-root',
MASTER_LOG_FILE='mysql-bin.000004',
MASTER_LOG_POS=814;
# 从库开启同步
start slave;
# 查看同步状态
show slave status;
# 测试给主库加一个数据库,刷新从库,发现,从库也会同步过来那个创建的新数据库
redis单机
mkdir -p /root/docker/redis/{conf,data} && cd /root/docker/redis/conf/
version: '3'
services:
redis:
image: redis:5.0.8 # 镜像名
container_name: redis # 容器名
restart: always # 重启策略
ports: # 端口映射
- 6379:6379
environment:
TZ: Asia/Shanghai # 设置环境变量 时区上海 编码UTF-8
LANG: en_US.UTF-8
volumes:
- /root/docker/redis/conf/redis.conf:/etc/redis/redis.conf # 配置文件,本地映射后可以控制容器中的redis.conf
- /root/docker/redis/data:/data:rw # 数据文件
command:
redis-server /etc/redis/redis.conf
deploy:
resources:
limits:
memory: 500M
reservations:
memory: 200M
⭐redis集群三主三从
基于docker-compose搭建redis集群_docker-compose建立redis集群-CSDN博客
version: '3'
services:
redis1:
image: daocloud.io/library/redis:6.0.4
container_name: redis-1
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6379:6379
- 16379:16379
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-1/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
redis2:
image: daocloud.io/library/redis:6.0.4
container_name: redis-2
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6380:6380
- 16380:16380
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-2/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
redis3:
image: daocloud.io/library/redis:6.0.4
container_name: redis-3
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6381:6381
- 16381:16381
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-3/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
redis4:
image: daocloud.io/library/redis:6.0.4
container_name: redis-4
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6382:6382
- 16382:16382
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-4/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
redis5:
image: daocloud.io/library/redis:6.0.4
container_name: redis-5
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6383:6383
- 16383:16383
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-5/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
redis6:
image: daocloud.io/library/redis:6.0.4
container_name: redis-6
restart: always
environment:
- REDISCLI_AUTH=123456
ports:
- 6384:6384
- 16384:16384
networks:
- redis-net
volumes:
- /root/docker/redis-cluster/redis-6/redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
networks:
redis-net:
name: redis-net
mkdir -p /root/docker/redis-cluster/{redis-1,redis-2,redis-3,redis-4,redis-5,redis-6}
#每个redis节点都配上redis.conf
vim /root/docker/redis-cluster/redis-1/redsi.conf
vim /root/docker/redis-cluster/redis-2/redsi.conf
vim /root/docker/redis-cluster/redis-3/redsi.conf
vim /root/docker/redis-cluster/redis-4/redsi.conf
vim /root/docker/redis-cluster/redis-5/redsi.conf
vim /root/docker/redis-cluster/redis-6/redsi.conf
# 开启集群
cluster-enabled yes
# 集群配置文件
cluster-config-file nodes.conf
# 集群节点多少时间未响应视为该节点丢失
cluster-node-timeout 5000
appendonly yes
# redis监听端口
port 6379
masterauth 123456
requirepass 123456
# 进入容器后执行
docker exec -it redis-1 bash
redis-cli -c -h 192.168.1.65 -p 6379 -a 123456
flushdb
cluster reset
exit
redis-cli --cluster create 192.168.1.65:6379 192.168.1.65:6380 192.168.1.65:6381 192.168.1.65:6382 192.168.1.65:6383 192.168.1.65:6384 --cluster-replicas 1
Halo
开源博客
version: "3"
services:
halo:
image: registry.fit2cloud.com/halo/halo:2.16
container_name: halo
restart: on-failure:3
ports:
- 9090:8090
volumes:
- ./halo2:/root/.halo2
command:
- --spring.r2dbc.url=r2dbc:pool:mysql://自己数据库的ip地址:3306/halo
- --spring.r2dbc.username=用户名
- --spring.r2dbc.password=密码
- --spring.sql.init.platform=mysql
- --halo.external-url=http://hulupet.cn/
- --server.port=8090
docker-compose部署部分中间件
http://101.126.22.188:9090//2024/06/30/1719759655515