docker 安装

官网:https://docs.docker.com/engine/install/centos/

镜像网站:https://hub.docker.com/

卸载旧版本

sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine

配置 docker yum源

sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

安装最新的 docker

docker-ce:docker 引擎
docker-ce-cli:docker 引擎的命令行程序
containerd.io:docker 运行时环境
docker-buildx-plugin:docker 构建的插件
docker-compose-plugin:docker 批量工具

sudo yum install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

启动 docker

# 下次开机还需要再次启动
sudo systemctl start docker
# 开机启动
systemctl enable docker

配置加速

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://mirror.ccs.tencentyun.com","https://82m9ar63.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

常用命令

#查看运行中的容器
docker ps
#查看所有容器
docker ps -a
#搜索镜像
docker search nginx
#下载镜像
docker pull nginx
#下载指定版本镜像
docker pull nginx:1.26.0
#查看所有镜像
docker images
#删除指定id的镜像
docker rmi e784f4560448


#运行一个新容器
docker run nginx
#停止容器 keen_blackwell 是容器名
docker stop keen_blackwell
#启动容器 592 容器id的简写(前三位 )
docker start 592
#重启容器
docker restart 592
#查看容器资源占用情况
docker stats 592
#查看容器日志
docker logs 592
#删除指定容器
docker rm 592
#强制删除指定容器
docker rm -f 592
# 后台启动容器 -d 后台启动 --name 指定名字(不指定会给个默认名)
docker run -d --name mynginx nginx
# 后台启动并暴露端口  -p 指定端口映射  外部88映射内部80  80可重复,88不可
docker run -d --name mynginx -p 88:80 nginx
# 进入容器内部 -it 交互模式  /bin/bash 交互方式
docker exec -it mynginx /bin/bash



# 提交容器变化打包成一个新的镜像 -a 作者 -c 改变的列表 -m 提交信息   -p 打包期间暂停容器的运行
docker commit -m "update index.html" mynginx mynginx:v1.0
# 保存镜像为指定文件
docker save -o mynginx.tar mynginx:v1.0
# 删除多个镜像
docker rmi bde7d154a67f 94543a6c1aef e784f4560448
# 加载镜像 
docker load -i mynginx.tar 
# 运行镜像
docker run mynginx:v1.0

登录 docker hub

Docker插图

推送镜像

# leifengyang 是docker hub 的用户名 
# 登录 docker hub
docker login
# 重新给镜像打标签
docker tag mynginx:v1.0 leifengyang/mynginx:v1.0
docker tag mynginx:v1.0 leifengyang/mynginx:latest
# 推送镜像
docker push leifengyang/mynginx:v1.0

Docker插图(1)

目录挂载

docker 启动容器之后如果想要改动需要进入容器,修改起来比较麻烦,解决:目录挂载。
● 目录挂载: -v /app/nghtml:/usr/share/nginx/html
● 卷映射:-v ngconf:/etc/nginx #ngconf是卷名

目录挂载:内容以外部的为准外部没有对应的文件内部有那就是没有
卷映射:卷会自动创建内部的文件会同步给卷,相当于自动进行同步

#/app/nghtml是外部文件映射内部文件/usr/share/nginx/html
#/app/nghtml没有的话会自动创建,内容以外部为准
docker run -d -p 99:80 \
-v /app/nghtml:/usr/share/nginx/html \
-v ngconf:/etc/nginx \
--name app03 \
nginx

卷的位置

默认位置

/var/lib/docker/volumes/卷名
# 列出所有的卷
docker volume ls
# 创建卷  也会放到默认位置
docker create 卷名
# 查看卷的详细信息
docker volume inspect 卷名

Docker插图(2)

网络

app1访问app2 方式一

# 进入app1
docker exec -it app1 bash
curl http://ip:99

Docker插图(3)

app1访问app2 方式二

docker 提供了一个机制:每个应用启动的时候都会加入 docker 的默认网络 docker0 (安装docker的时候就安装了)

# 可以看到有个网卡:docker0
ip a
# 查看容器的信息
docker container inspect app1

每个容器都有一个内部网络
Docker插图(4)
再次访问

# 进入app1
docker exec -it app1 bash
# 这里的ip和端口是容器内部的
curl http://172.17.0.3:80

docker 为每个容器分配唯一 IP 使用容器 IP+容器端口可以相互访问,ip由于各种原因可能会变化(可以使用域名的方式)

自定义网络

docker0 默认不支持主机域名
创建自定义网络,容器名就是稳定域名

# 创建自定义网络
docker network create mynet
# 查看网络
docker network ls

Docker插图(5)

app1再次访问app2

# 进入app1
docker exec -it app1 bash
# 端口是容器内部的  app2是容器名
curl http://app2:80

redis 主从同步集群

docker hub 的 bitnami/redis
找到 Configuration
自定义环境变量 Environment variables
Docker插图(6)
Docker插图(7)

#自定义网络 容器之间可以使用容器名进行访问
docker network create mynet
#主节点  bitnami/redis 是镜像  -e 是环境变量 -v目录挂载 --network 指定网络
docker run -d -p 6379:6379 \
-v /app/rd1:/bitnami/redis/data \
-e REDIS_REPLICATION_MODE=master \
-e REDIS_PASSWORD=123456 \
--network mynet --name redis01 \
bitnami/redis

#从节点
docker run -d -p 6380:6379 \
-v /app/rd2:/bitnami/redis/data \
-e REDIS_REPLICATION_MODE=slave \
-e REDIS_MASTER_HOST=redis01 \
-e REDIS_MASTER_PORT_NUMBER=6379 \
-e REDIS_MASTER_PASSWORD=123456 \
-e REDIS_PASSWORD=123456 \
--network mynet --name redis02 \
bitnami/redis

Docker插图(8)

启动 mysql

docker run -d -p 3306:3306 \
-v /app/myconf:/etc/mysql/conf.d \
-v /app/mydata:/var/lib/mysql \
-e MYSQL_ROOT_PASSWORD=123456 \
mysql:8.0.37-debian

Docker Compose

批量管理容器
Docker插图(9)

命令式安装

#创建网络
docker network create blog

#启动mysql --restart always 开机启动
docker run -d -p 3306:3306 \
-e MYSQL_ROOT_PASSWORD=123456 \
-e MYSQL_DATABASE=wordpress \
-v mysql-data:/var/lib/mysql \
-v /app/myconf:/etc/mysql/conf.d \
--restart always --name mysql \
--network blog \
mysql:8.0

#启动wordpress
docker run -d -p 8080:80 \
-e WORDPRESS_DB_HOST=mysql \
-e WORDPRESS_DB_USER=root \
-e WORDPRESS_DB_PASSWORD=123456 \
-e WORDPRESS_DB_NAME=wordpress \
-v wordpress:/var/www/html \
--restart always --name wordpress-app \
--network blog \
wordpress:latest

compose.yaml

Docker插图(10)
compose.yaml编写

name: myblog
services:
  mysql:
  	# 指定容器名
    container_name: mysql
    image: mysql:8.0
    ports:
      - "3306:3306"
    environment:
      - MYSQL_ROOT_PASSWORD=123456
      - MYSQL_DATABASE=wordpress
    volumes:
      - mysql-data:/var/lib/mysql
      - /app/myconf:/etc/mysql/conf.d
    restart: always
    networks:
      - blog

  wordpress:
    image: wordpress
    ports:
      - "8080:80"
    environment:
      WORDPRESS_DB_HOST: mysql
      WORDPRESS_DB_USER: root
      WORDPRESS_DB_PASSWORD: 123456
      WORDPRESS_DB_NAME: wordpress
    volumes:
      - wordpress:/var/www/html
    restart: always
    networks:
      - blog
    # 启动依赖谁(相当于启动顺序了)
    depends_on:
      - mysql

volumes:
  mysql-data:
  wordpress:

networks:
  blog:

地址:https://docs.docker.com/compose/compose-file/05-services/#image
Docker插图(11)
启动 compose.yaml
修改 Docker Compose 文件。重新启动应用。自动判断,只会触发修改项的重新启动。

# 不指定默认就是 compose.yml
docker compose up -d
# 指定 -f
docker compose -f compose.yml up -d

Docker插图(12)
下线移除
默认就算down了容器,所有挂载的卷不会被移除。比较安全

# 并没有移除容器的卷
docker compose -f compose.yml down
# 移除容器并移除容器的卷 使用--rmi的时候后面需要指定移除哪个镜像的 可以使用local all 移除所有
docker compose -f compose.yml down --rmi all -v

Dockerfile

自定义镜像-指令:https://docs.docker.com/reference/dockerfile/

构建镜像

新建 Dockerfile 文件 (当前目录下有个app.jar)

FROM openjdk:17
LABEL author=dudu
COPY app.jar /app.jar
EXPOSE 8080
ENTRYPOINT ["java","-jar","/app.jar"]

构建

docker build -f Dockerfile -t myjavaapp:v1.0

Docker插图(13)

镜像分层

相当于每个命令都产生一个存储层,这个指令引起的变化就存到其存储层(减轻磁盘存储压力)
Docker插图(14)

一键启动所有中间件

Docker插图(15)

# 关闭内存分页
#Disable memory paging and swapping performance
sudo swapoff -a

# 修改文件
# Edit the sysctl config file
sudo vi /etc/sysctl.conf
# Add a line to define the desired value
# or change the value if the key exists,
# and then save your changes.
vm.max_map_count=262144
# Reload the kernel parameters using sysctl

# 使配置文件生效
sudo sysctl -p

# 确认数量改的对不对
# Verify that the change was applied by checking the value
cat /proc/sys/vm/max_map_count

编写 compose.yml 文件

EXTERNAL://119.45.147.122:9094
将下面文件中 kafka 的 119.45.147.122 改为你自己的服务器IP。
所有容器都做了时间同步,这样容器的时间和linux主机的时间就一致了

name: devsoft
services:
redis:
image: bitnami/redis:latest
restart: always
container_name: redis
environment:
- REDIS_PASSWORD=123456
ports:
- '6379:6379'
volumes:
- redis-data:/bitnami/redis/data
- redis-conf:/opt/bitnami/redis/mounted-etc
- /etc/localtime:/etc/localtime:ro
mysql:
image: mysql:8.0.31
restart: always
container_name: mysql
environment:
- MYSQL_ROOT_PASSWORD=123456
ports:
- '3306:3306'
- '33060:33060'
volumes:
- mysql-conf:/etc/mysql/conf.d
- mysql-data:/var/lib/mysql
- /etc/localtime:/etc/localtime:ro
rabbit:
image: rabbitmq:3-management
restart: always
container_name: rabbitmq
ports:
- "5672:5672"
- "15672:15672"
environment:
- RABBITMQ_DEFAULT_USER=rabbit
- RABBITMQ_DEFAULT_PASS=rabbit
- RABBITMQ_DEFAULT_VHOST=dev
volumes:
- rabbit-data:/var/lib/rabbitmq
- rabbit-app:/etc/rabbitmq
- /etc/localtime:/etc/localtime:ro
opensearch-node1:
image: opensearchproject/opensearch:2.13.0
container_name: opensearch-node1
environment:
- cluster.name=opensearch-cluster # Name the cluster
- node.name=opensearch-node1 # Name the node that will run in this container
- discovery.seed_hosts=opensearch-node1,opensearch-node2 # Nodes to look for when discovering the cluster
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 # Nodes eligibile to serve as cluster manager
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM
- "DISABLE_INSTALL_DEMO_CONFIG=true" # Prevents execution of bundled demo script which installs demo certificates and security configurations to OpenSearch
- "DISABLE_SECURITY_PLUGIN=true" # Disables Security plugin
ulimits:
memlock:
soft: -1 # Set memlock to unlimited (no soft or hard limit)
hard: -1
nofile:
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
hard: 65536
volumes:
- opensearch-data1:/usr/share/opensearch/data # Creates volume called opensearch-data1 and mounts it to the container
- /etc/localtime:/etc/localtime:ro
ports:
- 9200:9200 # REST API
- 9600:9600 # Performance Analyzer
opensearch-node2:
image: opensearchproject/opensearch:2.13.0
container_name: opensearch-node2
environment:
- cluster.name=opensearch-cluster # Name the cluster
- node.name=opensearch-node2 # Name the node that will run in this container
- discovery.seed_hosts=opensearch-node1,opensearch-node2 # Nodes to look for when discovering the cluster
- cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 # Nodes eligibile to serve as cluster manager
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM
- "DISABLE_INSTALL_DEMO_CONFIG=true" # Prevents execution of bundled demo script which installs demo certificates and security configurations to OpenSearch
- "DISABLE_SECURITY_PLUGIN=true" # Disables Security plugin
ulimits:
memlock:
soft: -1 # Set memlock to unlimited (no soft or hard limit)
hard: -1
nofile:
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536
hard: 65536
volumes:
- /etc/localtime:/etc/localtime:ro
- opensearch-data2:/usr/share/opensearch/data # Creates volume called opensearch-data2 and mounts it to the container
opensearch-dashboards:
image: opensearchproject/opensearch-dashboards:2.13.0
container_name: opensearch-dashboards
ports:
- 5601:5601 # Map host port 5601 to container port 5601
expose:
- "5601" # Expose port 5601 for web access to OpenSearch Dashboards
environment:
- 'OPENSEARCH_HOSTS=["http://opensearch-node1:9200","http://opensearch-node2:9200"]'
- "DISABLE_SECURITY_DASHBOARDS_PLUGIN=true" # disables security dashboards plugin in OpenSearch Dashboards
volumes:
- /etc/localtime:/etc/localtime:ro
zookeeper:
image: bitnami/zookeeper:3.9
container_name: zookeeper
restart: always
ports:
- "2181:2181"
volumes:
- "zookeeper_data:/bitnami"
- /etc/localtime:/etc/localtime:ro
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: 'bitnami/kafka:3.4'
container_name: kafka
restart: always
hostname: kafka
ports:
- '9092:9092'
- '9094:9094'
environment:
- KAFKA_CFG_NODE_ID=0
- KAFKA_CFG_PROCESS_ROLES=controller,broker
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9094
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://119.45.147.122:9094
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- ALLOW_PLAINTEXT_LISTENER=yes
- "KAFKA_HEAP_OPTS=-Xmx512m -Xms512m"
volumes:
- kafka-conf:/bitnami/kafka/config
- kafka-data:/bitnami/kafka/data
- /etc/localtime:/etc/localtime:ro
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
restart: always
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: true
KAFKA_CLUSTERS_0_NAME: kafka-dev
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
volumes:
- kafkaui-app:/etc/kafkaui
- /etc/localtime:/etc/localtime:ro
nacos:
image: nacos/nacos-server:v2.3.1
container_name: nacos
ports:
- 8848:8848
- 9848:9848
environment:
- PREFER_HOST_MODE=hostname
- MODE=standalone
- JVM_XMX=512m
- JVM_XMS=512m
- SPRING_DATASOURCE_PLATFORM=mysql
- MYSQL_SERVICE_HOST=nacos-mysql
- MYSQL_SERVICE_DB_NAME=nacos_devtest
- MYSQL_SERVICE_PORT=3306
- MYSQL_SERVICE_USER=nacos
- MYSQL_SERVICE_PASSWORD=nacos
- MYSQL_SERVICE_DB_PARAM=characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true
- NACOS_AUTH_IDENTITY_KEY=2222
- NACOS_AUTH_IDENTITY_VALUE=2xxx
- NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456789012345678901234567890123456789
- NACOS_AUTH_ENABLE=true
volumes:
- /app/nacos/standalone-logs/:/home/nacos/logs
- /etc/localtime:/etc/localtime:ro
depends_on:
nacos-mysql:
condition: service_healthy
nacos-mysql:
container_name: nacos-mysql
build:
context: .
dockerfile_inline: |
FROM mysql:8.0.31
ADD https://raw.githubusercontent.com/alibaba/nacos/2.3.2/distribution/conf/mysql-schema.sql /docker-entrypoint-initdb.d/nacos-mysql.sql
RUN chown -R mysql:mysql /docker-entrypoint-initdb.d/nacos-mysql.sql
EXPOSE 3306
CMD ["mysqld", "--character-set-server=utf8mb4", "--collation-server=utf8mb4_unicode_ci"]
image: nacos/mysql:8.0.30
environment:
- MYSQL_ROOT_PASSWORD=root
- MYSQL_DATABASE=nacos_devtest
- MYSQL_USER=nacos
- MYSQL_PASSWORD=nacos
- LANG=C.UTF-8
volumes:
- nacos-mysqldata:/var/lib/mysql
- /etc/localtime:/etc/localtime:ro
ports:
- "13306:3306"
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 5s
timeout: 10s
retries: 10
prometheus:
image: prom/prometheus:v2.52.0
container_name: prometheus
restart: always
ports:
- 9090:9090
volumes:
- prometheus-data:/prometheus
- prometheus-conf:/etc/prometheus
- /etc/localtime:/etc/localtime:ro
grafana:
image: grafana/grafana:10.4.2
container_name: grafana
restart: always
ports:
- 3000:3000
volumes:
- grafana-data:/var/lib/grafana
- /etc/localtime:/etc/localtime:ro
volumes:
redis-data:
redis-conf:
mysql-conf:
mysql-data:
rabbit-data:
rabbit-app:
opensearch-data1:
opensearch-data2:
nacos-mysqldata:
zookeeper_data:
kafka-conf:
kafka-data:
kafkaui-app:
prometheus-data:
prometheus-conf:
grafana-data:

启动 compose.yml

# 在 compose.yaml 文件所在的目录下执行
docker compose up -d

访问测试

zookeeper可视化工具下载:https://github.com/vran-dev/PrettyZoo/releases/download/v2.1.1/prettyZoo-win.zip

Docker插图(16)

本站无任何商业行为
个人在线分享 » Docker
E-->