-
Notifications
You must be signed in to change notification settings - Fork 1
Docker相关
- 注册中心
/etc/docker/daemon.json
{
"registry-mirrors": ["http://hub-mirror.c.163.com"],
"insecure-registries":["172.21.32.128:5000"],
"log-driver":"json-file",
"log-opts":{ "max-size" :"50m","max-file":"1"},
"bip":"10.172.0.1/16",
"default-address-pools": [{"base": "10.192.0.1/16","size": 24}]
}
{
"registry-mirrors": ["http://172.21.27.31:6000"],
"insecure-registries":["172.21.32.31:5000","172.21.65.101:5000"],
"log-driver":"json-file",
"log-opts":{ "max-size" :"50m","max-file":"1"},
"bip":"10.172.0.1/16",
"default-address-pools": [{"base": "10.192.0.1/16","size": 24}]
}
- 删除容器和镜像
docker rm -f $(docker ps -a|grep -v grep|awk '{print $1}')
docker rmi -f $(docker images -a|grep -v grep|awk '{print $3}')
删除卷
docker volume rm -f $(docker volume ls|grep -v grep|awk '{print $2}')
- 打开2375端口
/usr/lib/systemd/system/docker.service
#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock
- 重启docker
systemctl enable docker
systemctl daemon-reload
systemctl restart docker
systemctl status docker
- docker插件推送私库
docker plugin install --alias 172.21.32.31:5000/rexray/rbd rexray/rbd RBD_DEFAULTPOOL=rbd LINUX_VOLUME_FILEMODE=0777
docker plugin push 172.21.32.31:5000/rexray/rbd
- docker清理不可用(容器、镜像、卷、网络等)
docker container prune
docker images prune
docker volume prune
docker network prune
或
docker system prune -a -f
清理none镜像
docker images|grep none|awk '{print $3}'|xargs docker rmi
清理特定镜像(如 nginx-swarm)
docker images|grep nginx-swarm|awk '{printf ("%s:%s\n",$1,$2)}'|xargs docker rmi
清理私库垃圾
registry garbage-collect /etc/docker/registry/config.yml
- docker stack发布
docker stack deploy --compose-file kafka-stack2.yml kz
或
cat kafka-stack2.yml | docker stack deploy --compose-file - kz
- docker镜像保存或加载
docker commit -m "redmine bak" 7d6bdfdfed0f redmine:3.4.13
#7d6bdfdfed0f为运行容器ID, redmine:3.4.13为容器镜像标签
docker save -o redmine-v3.4.13.tar redmine:3.4.13
cd /home
docker load -i redmine-v3.4.13.tar
- 查看容器进程与服务
echo -e "`docker stats --no-stream -a| awk 'NR==1'`\tPID\t[%CPU %MEM]";\
for cid in `docker ps|grep -v grep|grep -v CONTAINER |awk '{print $1}'`; do \
pid=`docker top $cid |awk '{if (NR>1){print $2}}'|head -n 1`;\
ctx=`docker stats --no-stream -a| grep $cid |awk -F '\t' '{print $1,$2,$3,$4,$5}'`;\
tcx=`top -n 1|grep $pid|awk '{print $10,$11}'`;\
echo -e "$ctx\t$pid\t[$tcx]"; \
done;
- 性能监控
# 仅一次性能结果
docker stats --no-stream -a
# 实时结果
docker stats -a
- 查看有效镜像
docker images|grep -v '<none>'|awk '{if (NR>1){printf ("%s:%s\n",$1,$2)}}'
docker swarm init --advertise-addr 172.21.32.104
返回信息:
root@dbs-server03:/opt# docker swarm init --advertise-addr 172.21.32.104
Swarm initialized: current node (4h38yngku0qzdrw36jlp9zb9o) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1icuh1xck1oy4eyk6ccluioz3vsa92cgskmw3xmsfn6dngdwr5-cosx52x5iiq8sack10u5t0tuf 172.21.32.104:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
root@dbs-server03:/opt# docker swarm join-token manager
To add a manager to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1icuh1xck1oy4eyk6ccluioz3vsa92cgskmw3xmsfn6dngdwr5-3fmpoymtt9lff9u7vs1rffytl 172.21.32.104:2377
docker swarm update --cert-expiry 8760h0m0s
docker swarm update --dispatcher-heartbeat 1m
# 容器内部分配IP地址
docker swarm init --default-addr-pool 10.0.0.0/16 --default-addr-pool 192.168.1.0/24
#在管理节点运行时该节点成为管理节点的单节点集群,能够管理和运行服务。管理者拥有以前关于服务和任务的所有信息,工作节点仍然是集群的一部分,服务仍将运行。将需要添加或重新添加管理器节点以实现以前的任务分发,并确保有足够的管理员来维护高可用性并防止丢失法定人数。
docker swarm init --force-new-cluster --advertise-addr ens6
# 复杂构建
docker swarm init --force-new-cluster --advertise-addr ens6 --cert-expiry 8760h0m0s --dispatcher-heartbeat 1m --default-addr-pool 10.20.0.0/16 --default-addr-pool 10.30.0.0/16 --default-addr-pool-mask-length 24
docker swarm join --token SWMTKN-1-4yeg5wxc3hf1l9l6sg605528qg3dnpv4djf3uyxyzsv7m1rjw8-0zcvyqdzg4547jq9cd8ug16vh 172.21.32.104:2377
返回信息:
root@dbs-server10:/opt# docker swarm join --token SWMTKN-1-4yeg5wxc3hf1l9l6sg605528qg3dnpv4djf3uyxyzsv7m1rjw8-0zcvyqdzg4547jq9cd8ug16vh 172.21.32.104:2377
This node joined a swarm as a worker.
root@dbs-server03:/opt# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
tqvsmkqbhrbk3n3pvwzmdadfr * dbs-server03 Ready Active Leader 18.09.7
izz77qa1bth8ujbnmvrokprrz dbs-server04 Ready Active 18.09.7
do8o4lrw3qaolglz0rrkmg1gj dbs-server10 Ready Active 18.09.7
# 节点信息
docker info
>>关注点信息
Is Manager: true
ClusterID: qkrcllksq2b8jlz8jclrfceng
Managers: 1
Nodes: 3
# 更改节点角色
docker node update dbs-server04 --role worker
# 角色 : manager | worker
docker node update --availability Active dbs-server03
#节点状态: Active | Drain | Pause
docker swarm leave --force
#在leader上执行:
docker node demote dbs-server03
docker node rm dbs-server03
错误信息:Error response from daemon: context deadline exceeded,解决方法:
rm -rf /var/lib/docker/swarm/worker/tasks.db
systemctl restart docker
docker swarm leave --force
# 再次加入集群
错误信息:network sandbox join failed: subnet sandbox join failed for "10.0.1.0/24": error creating vxlan inte,解决方法:
ip -d link show | grep vx
ip link delete vx-001001-i24o5
# 再次加入集群
docker node update --label-add hostname=`hostname -s` `hostname -s`
#查看标签
docker node inspect `hostname -s`
标签使用(docker service): --constraint 'node.labels.hostname == dbs-server128'
A. 创建/opt/portainer目录,构建服务
rm -rf /opt/portainer && mkdir -p /opt/portainer
docker service rm portainer
docker service create \
--name portainer \
--publish 9000:9000 \
--replicas=1 \
--mount type=bind,src=//var/run/docker.sock,dst=/var/run/docker.sock \
--mount type=bind,src=//opt/portainer,dst=/data \
--container-label com.docker.stack.namespace=swarm \
--label com.docker.stack.namespace=swarm \
portainer/portainer-ce:2.15.1 \
-H unix:///var/run/docker.sock
#更新镜像
docker service update --image portainer/portainer-ce:2.15.1 portainer
注:错误信息: invalid mount config for type "bind": bind source path does not exist: //opt/portainer,其解决办法:
1)重启docker: systemctl restart docker
2)查看swarm状态:docker node ls
3)如果swarm状态始终为down,则参考Error response from daemon: context deadline exceeded的处理办法
B. 访问http://172.21.32.104:9000并设置用户密码
C. 设置Endpoints及其groups
# 添加所有节点
Environment type为Docker,Endpoint URL为x.x.x.x:2375,name为x.x.x.x
1)问题:将所有服务重新部署为新服务时,可能会发生Orchestrator端口冲突(由于短时间内有许多Swarm管理器请求,因此某些服务无法接收流量,并且404 在部署后会导致错误)
解决方式:
docker service update --force
http://172.21.32.182:2375/services?filters={%22label%22:[%22com.docker.stack.namespace=cps%22]}
docker network rm docker_gwbridge
- 构建ceph目录
rm -rf /etc/ceph
rm -rf /var/lib/ceph
rm -rf /opt/ceph/rbd
rm -rf /var/log/ceph
mkdir -p /etc/ceph /var/lib/ceph /var/lib/ceph/osd /opt/ceph/rbd /var/log/ceph
chown -R 167:167 /etc/ceph
chown -R 167:167 /var/lib/ceph
chown -R root:root /etc/ceph/ceph.conf
mkfs.xfs /dev/sdb -f
mount /dev/sdb /opt/ceph/rbd
- ceph配置
apt-get install ceph-deploy
apt-get install ntp
apt-get install libncurses5-dev gcc make git exuberant-ctags bc libssl-dev
service --status-all
ubuntu配置源
curl -fsSL 'http://download.ceph.com/keys/release.gpg' | apt-key add
echo deb https://download.ceph.com/debian-octopus/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
- ceph服务
docker run -d \
--name mon \
--restart=always \
--mount type=bind,src=//etc/ceph,dst=/etc/ceph \
--mount type=bind,src=//var/lib/ceph,dst=/var/lib/ceph \
--env MON_IP=172.21.32.110 \
--env DEBUG=stayalive \
--env CEPH_PUBLIC_NETWORK=172.21.32.0/24 \
--env CEPH_MON_ALLOW_POOL_DELETE=true \
--network=host \
--label com.docker.stack.namespace=dbs \
ceph/daemon:latest-devel mon
docker run -d \
--name mgr \
--restart=always \
--mount type=bind,src=//etc/ceph,dst=/etc/ceph \
--mount type=bind,src=//var/lib/ceph,dst=/var/lib/ceph \
--network=host \
--label com.docker.stack.namespace=dbs \
ceph/daemon:latest-devel mgr
docker run -d \
--name osd \
--privileged=true \
--restart=always \
--mount type=bind,src=//etc/ceph,dst=/etc/ceph \
--mount type=bind,src=//var/lib/ceph,dst=/var/lib/ceph \
--mount type=bind,src=//dev,dst=/dev \
--mount type=bind,src=//opt/ceph/rbd1,dst=/var/lib/ceph/osd \
--network=host \
--label com.docker.stack.namespace=dbs \
ceph/daemon:latest-devel osd
docker run -d \
--name mds \
--restart=always \
--mount type=bind,src=//etc/ceph,dst=/etc/ceph \
--mount type=bind,src=//var/lib/ceph,dst=/var/lib/ceph \
--network=host \
--label com.docker.stack.namespace=dbs \
ceph/daemon:latest-devel mds
docker run -d \
--name rgw \
--restart=always \
--mount type=bind,src=//etc/ceph,dst=/etc/ceph \
--mount type=bind,src=//var/lib/ceph,dst=/var/lib/ceph \
--network=host \
--label com.docker.stack.namespace=dbs \
ceph/daemon:latest-devel rgw
在mon端执行授权
docker exec mon ceph auth get client.bootstrap-osd -o /var/lib/ceph/bootstrap-osd/ceph.keyring
docker exec mon ceph auth get client.bootstrap-mds -o /var/lib/ceph/bootstrap-mds/ceph.keyring
docker exec mon ceph auth get client.bootstrap-rgw -o /var/lib/ceph/bootstrap-rgw/ceph.keyring
docker exec mon ceph auth get client.bootstrap-rbd-mirror -o /var/lib/ceph/bootstrap-rbd-mirror/ceph.keyring
docker restart osd
osd端设置OSD_DEVICE=/dev/sdb创建后删除容器,再创建时不带此参数
mgr端开启UI方式:
docker exec mgr ceph mgr module enable rbd_support
docker exec mgr ceph mgr module enable dashboard
docker exec mgr ceph dashboard create-self-signed-cert
docker exec mgr ceph dashboard set-login-credentials ceph ceph
docker exec mgr ceph config set mgr mgr/dashboard/server_addr 172.21.32.110
docker exec mgr ceph config set mgr mgr/dashboard/server_port 7000
docker exec mgr ceph config set mgr mgr/dashboard/ssl false
docker restart mgr
docker exec mgr ceph mgr services
rgw配置
docker exec mgr ceph config set mgr mgr/dashboard/ssl_server_port 8000
docker exec mgr ceph dashboard ac-user-create admin admin administrator
docker exec mgr radosgw-admin user create --uid=admin --display-name=admin --system
docker exec mgr radosgw-admin user info --uid=admin
docker exec mgr ceph dashboard set-rgw-api-access-key Y03ODG54PMX1T837AZMX
docker exec mgr ceph dashboard set-rgw-api-secret-key NVs54S0BpotljD0xzPtBZk3sAE5Yvg80VwCdULIJ
docker exec mgr ceph dashboard set-rgw-api-host 172.21.32.110
docker exec mgr ceph dashboard set-rgw-api-port 24313
docker exec mgr ceph dashboard set-rgw-api-scheme http
docker exec mgr ceph dashboard set-rgw-api-admin-resource /api/sysinfo/hostname
docker exec mgr ceph dashboard set-rgw-api-user-id admin
docker exec mgr ceph dashboard set-rgw-api-ssl-verify false
docker exec mgr ceph dashboard set-rest-requests-timeout 600
docker exec mgr ceph dashboard set-iscsi-api-ssl-verification false
docker exec mgr ceph dashboard iscsi-gateway-list
#此命令有问题
docker exec mgr ceph dashboard iscsi-gateway-add http://admin:[email protected]:24313
docker exec mgr ceph dashboard iscsi-gateway-rm ceph
#rgw集群警告
ceph osd pool set .rgw.root size 1
ceph osd pool set .rgw.root pg_num 128
ceph osd pool set .rgw.root pgp_num 128
ceph osd pool set default.rgw.control size 1
ceph osd pool set default.rgw.control pg_num 128
ceph osd pool set default.rgw.control pgp_num 128
ceph osd pool set default.rgw.log size 1
ceph osd pool set default.rgw.log pg_num 128
ceph osd pool set default.rgw.log pgp_num 128
ceph osd pool set default.rgw.meta size 1
ceph osd pool set default.rgw.meta pg_num 128
ceph osd pool set default.rgw.meta pgp_num 128
#运行删除池/etc/ceph/ceph.conf
mon allow pool delete = true
- 安装与卸载Rex-Ray
rexray uninstall
rm -rf $(find / -name rexray)
rm -rf $(find / -name libstorage)
curl -sSL https://rexray.io/install | sh -s -- stable 0.9.2
- 配置(/etc/rexray/config.yml)
配置参考或下载:https://rexrayconfig.cfapps.io/
rexray:
logLevel: info
libstorage:
logging:
level: info
httpRequests: true
httpResponses: true
service: rbd
integration:
volume:
operations:
create:
default:
size: 20
mount:
preempt: true
unmount:
ignoreUsedCount: true
rbd:
defaultPool: rbd
- 重启rexray或查看状态
systemctl enable rexray;
systemctl restart rexray;
systemctl status rexray;
- 安装rexray的ceph插件
docker plugin disable rexray/rbd
docker plugin rm rexray/rbd
docker plugin install rexray/rbd RBD_DEFAULTPOOL=rbd LINUX_VOLUME_FILEMODE=0777
docker plugin ls
docker plugin inspect rexray/rbd
docker plugin install rexray/s3fs --grant-all-permissions --disable --alias 172.21.32.31:5000/rexray/s3fs:latest
docker plugin push 172.21.32.31:5000/rexray/s3fs:latest
docker plugin install rexray/rbd RBD_DEFAULTPOOL=rbd LINUX_VOLUME_FILEMODE=0777 --grant-all-permissions --disable --alias 172.21.32.31:5000/rexray/rbd:latest
docker plugin push 172.21.32.31:5000/rexray/rbd:latest
- 在ceph创建rbd存储池
管理界面创建,应用标签:cephfs rbd rgw,采用replicated模式 可在ceph.conf设置:mon allow pool delete = true
- 在rexray创建卷
rexray volume ls
docker volume create --driver=rexray/rbd:latest --name=test
- 应用如下:
version: '3.7'
services:
mysql:
image: 172.21.32.31:5000/mysql:5.7.22
environment:
- MYSQL_ROOT_PASSWORD=MysqlPwd1@
volumes:
- data4:/var/lib/mysql
ports:
- 3306:3306
deploy:
replicas: 2
volumes:
data4:
driver: rexray/rbd:latest
或
docker service create \
--name mysql \
--publish 3306:3306 \
--replicas=1 \
--env MYSQL_ROOT_PASSWORD=SqlPwd1@ \
--constraint 'node.role == manager' \
--mount type=volume,src=mysql_data,dst=/var/lib/mysql,volume-driver=rexray/rbd:latest \
--label com.docker.stack.namespace=dbs \
--container-label com.docker.stack.namespace=dbs \
172.21.32.31:5000/mysql:5.7.22
- ceph问题
docker: Error response from daemon: VolumeDriver.Mount: docker-legacy: Mount: data2: failed: unable to map rbd.
解决办法:
lsmod |grep rbd
modprobe rbd
lsmod |grep rbd
rexray v0.9.2手动安装
解决办法:
#Ubuntu系统
dpkg -i rexray_0.9.2-1_amd64.deb
#Centos
rpm -U --quiet rexray-0.9.2-1.x86_64.rpm > /dev/null
# Add a healthcheck (default every 30 secs)
HEALTHCHECK --interval=60s --timeout=10s --retries=5 CMD /usr/bin/mysqladmin -uroot -p$(echo $MYSQL_ROOT_PASSWORD) ping | grep alive || exit 1
FROM openjdk:8-jre
# Add a healthcheck (default every 30 secs)
HEALTHCHECK --interval=60s --timeout=10s --retries=5 CMD curl http://localhost:8631/swagger-ui.html || exit 1
docker login Error saving credentials: error storing credentials - err: exit status 1, out:
解决办法:
apt install gnupg2 pass
version: '3.8'
services:
nexus3:
image: sonatype/nexus3:latest
environment:
- NEXUS_CONTEXT=/
volumes:
- /opt/deploy/nexus:/nexus-data
- /etc/localtime:/etc/localtime
- /etc/timezone:/etc/timezone
ports:
- 8000:8081
deploy:
replicas: 1
注:需对映射目录授权:chown -R 200 /opt/deploy/nexus
默认用户名密码:admin/92c971cb-9f4c-4242-84ee-564b876924e4 对应文件/opt/deploy/nexus/admin.password