hj_服务器操作2023-05-11

yum list installed | grep podman 检查是否有安装
yum install podman -y 安装podman.   podman --version

[root@VM-12-9-opencloudos hj_files]# cat /etc/redhat-release
OpenCloudOS release 8.6.2205 (Core)   查看操作系统

date 查看日期

cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime  可重置为东八区时间

yum install docker  systemctl restart docker  systemctl status docker.service

docker cp /etc/localtime 容器ID:/etc/localtime  解决时区不一致问题

tar 的命令格式,如下: tar 只负责打包文件,但不压缩

 # 打包文件
 tar -cvf  打包文件.tar  被打包的文件 /路径...
 # 解包文件
 tar -xvf 打包文件.tar

gzip 的命令格式,如下: 一般都是配合 tar包文件来使用。               

# 压缩文件
 tar -zcvf  打包文件.tar.gz  被压缩的文件 /路径...
 # 解压缩文件
 tar -zxvf 打包文件.tar.gz
 # 解压缩到指定路径
 tar -zxvf  打包文件.tar.gz  -C  目标路径        

说明:

1. 用 gzip 压缩 tar 打包后的文件,其扩展名一般用 xxx.tar.gz

2. 在 tar 命令中有一个选项 -z 可以调用 gzip,从而可以方便的实现压缩和解压缩的功能

注意:tar 的 f 选项必须放在最后,其他选项顺序可以随意

https://blog.csdn.net/ken2232/article/details/131326086

# rabbitmq
podman pull rabbitmq
podman run -d \
    -p 15672:15672  -p  5672:5672 \
    -v /hj_files/mq/rabbitmq:/var/lib/rabbitmq \
    -e RABBITMQ_DEFAULT_USER=hj \
    -e RABBITMQ_DEFAULT_PASS=123456 \
    --hostname hjrabbitmq \
    --name hjrabbitmq \
    --restart=always \
    9b55f8c9f0dd
rabbitmq
# rocketmq 这个是4.4.0版本
# 拉取镜像
podman pull apache/rocketmq:4.9.4
# 创建外部挂载文件夹
mkdir /hj_files/mq/rocketmq/nameserver/logs -p
mkdir /hj_files/mq/rocketmq/broker/conf -p    这里面创建编辑broker.conf文件
mkdir /hj_files/mq/rocketmq/broker/logs -p
mkdir /hj_files/mq/rocketmq/broker/store -p

# 运行nameserver
podman run -d \
--restart=always \
--name hjmqnamesrv \
--privileged=true \
-p 9876:9876 \
-v /hj_files/mq/rocketmq/nameserver/logs:/root/logs \
-e "MAX_POSSIBLE_HEAP=1000000" \
a2a50ca263c3 \
sh mqnamesrv

# 运行broker
podman run -d  \
--restart=always \
--name hjmqbroker \
--privileged=true \
-p 10911:10911 \
-p 10909:10909 \
-v  /hj_files/mq/rocketmq/broker/logs:/root/logs \
-v  /hj_files/mq/rocketmq/broker/store:/root/store \
-v /hj_files/mq/rocketmq/broker/conf/broker.conf:/opt/rocketmq-4.4.0/conf/broker.conf \
-e "MAX_POSSIBLE_HEAP=200000000" \
a2a50ca263c3 \
sh mqbroker -c /opt/rocketmq-4.4.0/conf/broker.conf
# 试过很多次了.不知道为什么文件挂载不出来 .用/home/rocketmq/...这个路径下的也是挂载不出来;
# 这个问题就留给以后再研究吧..

# 运行可视化面板
podman pull apacherocketmq/rocketmq-console:2.0.0
podman run -d \
--name hjmqconsole \
-p 9898:8080 \
-e "JAVA_OPTS=-Drocketmq.namesrv.addr=101.35.260.105:9876 \
-Dcom.rocketmq.sendMessageWithVIPChannel=false" \
-t 0667dbb1308e
rocketmq4.4.0
# mysql5.7.36
podman pull mysql:5.7.36
podman run -d -p 3306:3306 --name hj_mysql -e MYSQL_ROOT_PASSWORD=123456 c20987f18b13
mkdir -p /etc/hj_mysql_5.7.36_3307/conf
mkdir -p /etc/hj_mysql_5.7.36_3307/data
podman cp 9fe0b673dab5:/etc/mysql/.  /etc/hj_mysql_5.7.36_3307/conf   #修改/conf/mysql.conf.d 下的 mysqld.cnf 文件
停止容器,重新运行
podman run -d --privileged=true \
--name hj_mysql_5.7.36_3307 -p 3307:3307 \
-v /etc/hj_mysql_5.7.36_3307/data:/var/lib/mysql \
-v /etc/hj_mysql_5.7.36_3307/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=123456 c20987f18b13
mysql5.7.36
# redis
podman pull redis:6.2.11
podman run -d --privileged=true \
-p 6380:6380 \
-v /hj_files/hj_redis6.2.11/conf/redis.conf:/etc/redis/redis.conf \
-v /hj_files/hj_redis6.2.11/data/:/data \
 --name hj_redis6.2.11 \
 55ee3f86a3bb \
 redis-server /etc/redis/redis.conf
redis6.2.11
################################## NETWORK #####################################
   # 这是docker安装redis的外部挂载conf,放这可以后做参考...
# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
# JUST COMMENT THE FOLLOWING LINE.
# bind 127.0.0.1
# By default protected mode is enabled. You should disable it only if
# you are sure you want clients from other hosts to connect to Redis
# even if no authentication is configured, nor a specific set of interfaces
# are explicitly listed using the "bind" directive.
#
protected-mode no


# Accept connections on the specified port, default is 6379 (IANA #815344).
# If port 0 is specified Redis will not listen on a TCP socket.
#
port 6380


# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
#
tcp-backlog 511


# Close the connection after a client is idle for N seconds (0 to disable)
#
timeout 0


# A reasonable value for this option is 300 seconds, which is the new
# Redis default starting with Redis 3.2.1.
#
tcp-keepalive 300


################################# GENERAL #####################################


# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
#
daemonize no


# If you run Redis from upstart or systemd, Redis can interact with your
# supervision tree. Options:
# supervised no - no supervision interaction
# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
# supervised auto - detect upstart or systemd method based on
# UPSTART_JOB or NOTIFY_SOCKET environment variables
# Note: these supervision methods only signal "process is ready."
# They do not enable continuous liveness pings back to your supervisor.
#
supervised no


# If a pid file is specified, Redis writes it where specified at startup
# and removes it at exit.
#
# When the server runs non daemonized, no pid file is created if none is
# specified in the configuration. When the server is daemonized, the pid file
# is used even if not specified, defaulting to "/var/run/redis.pid".
#
# Creating a pid file is best effort: if Redis is not able to create it
# nothing bad happens, the server will start and run normally.
#
pidfile /var/run/redis_6380.pid


# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
#
loglevel notice


# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
#
logfile ""


# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
# 填0启动就报这个错哦 argument must be between 1 and 2147483647 inclusive
#
databases 16


# However it is possible to force the pre-4.0 behavior and always show a
# ASCII art logo in startup logs by setting the following option to yes.
#
always-show-logo yes


################################ SNAPSHOTTING ################################

#
# Save the DB on disk:
#
# 这里是用来配置触发 Redis的 RDB 持久化条件,也就是什么时候将内存中的数据保存到硬盘.
# 比如“save m n”.表示m秒内数据集存在n次修改时,自动触发bgsave
# 当然如果你只是用Redis的缓存功能,不需要持久化:
# save ""
#
save 900 1
save 300 10
save 60 10000


# 默认值为yes.当启用了RDB且最后一次后台保存数据失败,Redis是否停止接收数据.
# 这会让用户意识到数据没有正确持久化到磁盘上,否则没有人会注意到灾难(disaster)发生了.
# 如果Redis重启了,那么又可以重新开始接收数据了
#
stop-writes-on-bgsave-error yes
# 建议设置为 no 否则程序可能就报这个错.
# MISCONF Redis is configured to save RDB snapshots,
# but it is currently not able to persist on disk. 
# Commands that may modify the data set are disabled, 
# because this instance is configured to report errors 
# during writes if RDB snapshotting fails 
# (stop-writes-on-bgsave-error option). 
# Please check the Redis logs for details about the RDB error.

# 默认值是yes.对于存储到磁盘中的快照,可以设置是否进行压缩存储.
# 如果是的话,redis会采用LZF算法进行压缩.
# 如果你不想消耗CPU来进行压缩的话,可以设置为关闭此功能,但是存储在磁盘上的快照会比较大.
#
rdbcompression yes


# 默认值是yes.在存储快照后,我们还可以让redis使用CRC64算法来进行数据校验,
# 但是这样做会增加大约10%的性能消耗,如果希望获取到最大的性能提升,可以关闭此功能.
#
rdbchecksum yes


# 设置快照的文件名,默认是 dump.rdb
#
dbfilename dump.rdb


# 设置快照文件的存放路径,这个配置项一定是个目录,而不能是文件名.默认是和当前配置文件保存在同一目录.
#
dir ./


################################# REPLICATION #################################

# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes


# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
#
slave-read-only yes


# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
#
repl-diskless-sync no


# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
#
repl-diskless-sync-delay 5


# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
#
repl-disable-tcp-nodelay no


# By default the priority is 100.
#
slave-priority 100


################################## SECURITY ###################################

# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
requirepass 123456


############################# LAZY FREEING ####################################


# In all the above cases the default is to delete objects in a blocking way,
# like if DEL was called. However you can configure each case specifically
# in order to instead release memory in a non-blocking way like if UNLINK
# was called, using the following configuration directives:
#
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no


############################## APPEND ONLY MODE ###############################


# 默认值为no,也就是说redis 默认使用的是rdb方式持久化,
# 如果想要开启 AOF 持久化方式,需要将 appendonly 修改为 yes.
#
appendonly no

# aof文件名,默认是"appendonly.aof"
#
appendfilename "appendonly.aof"


# aof持久化策略的配置;
#
#      no表示不执行fsync,由操作系统保证数据同步到磁盘,速度最快,但是不太安全;
#
#      always表示每次写入都执行fsync,以保证数据同步到磁盘,效率很低;
#
#      everysec表示每秒执行一次fsync,可能会导致丢失这1s数据.通常选择 everysec ,兼顾安全性和效率
#
appendfsync everysec


# 在aof重写或者写入rdb文件的时候,会执行大量IO,此时对于everysec和always的aof模式来说,
# 执行fsync会造成阻塞过长时间,no-appendfsync-on-rewrite字段设置为默认设置为no.
# 如果对延迟要求很高的应用,这个字段可以设置为yes,否则还是设置为no,
# 这样对持久化特性来说这是更安全的选择.
# 设置为yes表示rewrite期间对新写操作不fsync,暂时存在内存中,等rewrite完成后再写入,默认为no,建议yes.
# Linux的默认fsync策略是30秒.可能丢失30秒数据.默认值为no.
#
no-appendfsync-on-rewrite no


# 默认值为100.aof自动重写配置,当目前aof文件大小超过上一次重写的aof文件大小的百分之多少进行重写,
# 即当aof文件增长到一定大小的时候,Redis能够调用bgrewriteaof对日志文件进行重写.
# 当前AOF文件大小是上次日志重写得到AOF文件大小的二倍(设置为100)时,自动启动新的日志重写过程.
#
auto-aof-rewrite-percentage 100


# 设置允许重写的最小aof文件大小,避免了达到约定百分比但尺寸仍然很小的情况还要重写.
#
auto-aof-rewrite-min-size 64mb


# aof文件可能在尾部是不完整的,当redis启动的时候,aof文件的数据被载入内存.
# 重启可能发生在redis所在的主机操作系统宕机后,尤其在ext4文件系统没有加上data=ordered选项,
# 出现这种现象 redis宕机或者异常终止不会造成尾部不完整现象,可以选择让redis退出,
# 或者导入尽可能多的数据.如果选择的是yes,当截断的aof文件被导入的时候,
# 会自动发布一个log给客户端然后load.如果是no,用户必须手动redis-check-aof修复AOF文件才可以.
# 默认值为 yes
#
aof-load-truncated yes


# This is currently turned off by default in order to avoid the surprise
# of a format change, but will at some point be used as the default.
#
aof-use-rdb-preamble no


# Set it to 0 or a negative value for unlimited execution without warnings.
#
lua-time-limit 5000


################################## SLOW LOG ###################################


# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
#
slowlog-log-slower-than 10000


# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
#
slowlog-max-len 128


################################ LATENCY MONITOR ##############################


# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enabled at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
#
latency-monitor-threshold 0


############################# EVENT NOTIFICATION ##############################

# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
#
notify-keyspace-events ""


############################### ADVANCED CONFIG ###############################


# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
#
hash-max-ziplist-entries 512
hash-max-ziplist-value 64


# Positive numbers mean store up to _exactly_ that number of elements
# per list node.
# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
# but if your use case is unique, adjust the settings as necessary.
#
list-max-ziplist-size -2


# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
# etc.
#
list-compress-depth 0


# Sets have a special encoding in just one case: when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
#
set-max-intset-entries 512


# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
#
zset-max-ziplist-entries 128
zset-max-ziplist-value 64


# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
#
hll-sparse-max-bytes 3000


# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
#
activerehashing yes


# Both the hard or the soft limit can be disabled by setting them to zero.
#
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60


# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
#
hz 10


# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
#
aof-rewrite-incremental-fsync yes
redis.conf示例
# Copyright (c) 2014, 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation.  The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA

#
# The MySQL  Server configuration file.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html

[mysql]
# 设置mysql客户端默认字符集
default-character-set=utf8
[mysqld]
# 服务端使用的字符集默认为8比特编码的latin1字符集
character-set-server=utf8
#设置3307端口 默认是3306
port=3307
pid-file=/var/run/mysqld/mysqld.pid
socket=/var/run/mysqld/mysqld.sock
datadir=/var/lib/mysql
# 允许最大连接数
max_connections=20
# 创建新表时将使用的默认存储引擎
default-storage-engine=INNODB
#log-error    = /var/log/mysql/error.log
# By default we only accept connections from localhost
#bind-address    = 127.0.0.1
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
mysqld.cnf示例
# 类似注册中心
namesrvAddr=101.35.260.105:9876
# 当前broker监听的IP(主)
brokerIP1 = 101.35.260.105
# broker集群名称
brokerClusterName = DefaultCluster
# broker节点名称
brokerName = broker-a
# broker节点id
brokerId = 0
# 删除时间(4点)
deleteWhen = 04
# 文件保留时间(72小时)
fileReservedTime = 72
# broker角色
brokerRole = ASYNC_MASTER
# 磁盘同步方式:同步,异步
flushDiskType = ASYNC_FLUSH
# 是否允许Broker自动创建Topic,建议线下开启,线上关闭
autoCreateTopicEnable = true
# 是否允许Broker自动创建订阅组,建议线下开启,线上关闭
autoCreateSubscriptionGroup = true
# 检测可用的磁盘空间大小,超过后会写入报错(95%)
diskMaxUsedSpaceRatio=95
broker示例
# 基础镜像使用java
FROM docker.io/library/openjdk:17
# 作者
MAINTAINER hj
# VOLUME 容器挂载目录/tmp,这个是容器内部生成的目录
VOLUME /server/tmp
# 将jar包添加到容器中并更名为admin.jar
ADD hj-server-0.0.1-SNAPSHOT.jar hj.jar
# 时区
RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
RUN echo 'Asia/Shanghai' >/etc/timezone
# 暴露端口
EXPOSE 8888
# 包前面的add命令把jar复制添加,这个touch命令的作用是修改这个文件的(访问,修改时间)为当前时间,可有可无
RUN bash -c 'touch /hj.jar'
# 运行jar
ENTRYPOINT ["java","-jar","/hj.jar"]


# 运行相关操作
# podman build -f Dockerfile -t hj0510 如果是docker 最后面需要有个 '.'
# podman run -d --privileged=true --name hj0510 -p 8888:8888 92c05d5dfd3c
Dockerfile运行jar示例
app_name='hjserver'
app_port='8888'

# 停止正在运行的容器
echo '......stop container  hjserver......'
podman stop ${app_name}

# 删除容器
echo '......rm container  hjserver......'
podman rm ${app_name}

# 删除 名称为 app_name 镜像
echo '......rmi none images  hjserver......'
podman rmi `podman images | grep ${app_name} | awk '{print $3}'`

# 构建镜像

podman build -f Dockerfile  -t ${app_name}

# 重新生成并运行容器
# echo '......start container hjapp......'
# podman run -p ${app_port}:${app_port} -d  --name ${app_name} ${app_name}
# 挂载日志文件到外部
podman run -d \
--name ${app_name} -p ${app_port}:${app_port} \
--restart=always \
--privileged=true \
-v /etc/localtime:/etc/localtime \
-v /hj_files/server/tmp:/server/tmp \
-v /hj_files/server/logs:/logs \
${app_name}
# 重新生成并运行容器
echo '......Success hjserver......'
docker跑jar一键运行hjServer.sh
echo '#######hj_boot数据库开始备份######'
echo 'hj_boot backup started...'$(date "+%Y-%m-%d %H:%M:%S")
# 保留10天数据,
DATE=$(date +%Y%m%d)
# 数据库用户名
USERNAME=root
# 密码
PASSWORD=123456
# 要备份的数据库
DB1=hj_test
DB2=hj_boot
#  hj_mysql57 mysql容器 name
podman exec -i  hj_mysql57 bash<<'EOF'
# 判断目录是不是已经存在,如果不存在则创建
if [ ! -d "/backups/mysql" ]; then
  mkdir -p /backups/mysql
fi
# hj_test 为数据库的名称
# 这儿不能用 $来替换.. $USERNAME $PASSWORD  $DB1 等都是给报错
/usr/bin/mysqldump -uroot -p123456 hj_test | gzip > /backups/mysql/hj_test_$(date +%Y%m%d).sql.gz
/usr/bin/mysqldump -uroot -p123456 hj_boot | gzip > /backups/mysql/hj_boot_$(date +%Y%m%d).sql.gz
# 容器里面数据只给保存3天
rm -f /backups/mysql/hj_test_$(date -d -3day +%Y%m%d).sql.gz
rm -f /backups/mysql/hj_boot_$(date -d -3day +%Y%m%d).sql.gz
exit
EOF
echo '#######从容器里面出来了######'
# 判断目录是不是已经存在,如果不存在则创建
backup_dir=/hj_files/sh/sql
if [ ! -d $backup_dir ]; then
  mkdir -p $backup_dir
fi
# 将docker中的备份的数据拷贝到宿主机上
# hj_test 用$DB1替代是不行的.....
podman cp hj_mysql57:/backups/mysql/hj_test_$DATE.sql.gz $backup_dir/
podman cp hj_mysql57:/backups/mysql/hj_boot_$DATE.sql.gz $backup_dir/
# 删除超过10天的数据
rm -f $backup_dir/$DB1_$(date -d -10day +%Y%m%d).sql.gz
rm -f $backup_dir/$DB2_$(date -d -10day +%Y%m%d).sql.gz
echo '#######备份完成######'


# 加入到定时任务
# crontab -l
# crontab -e
# 每周五的凌晨三点执行一次数据备份
# 分钟  小时 日期  月份  星期   *表示任何 /表示间隔(0/3) -表示一段时间(3-6) ,表示离散时间 (1,28)
# 0 3 * * 5 /hj_files/sh/hjSqlBackupSql.sh
# 每天凌晨两点执行一次数据备份
# 0 2 * * * /hj_files/sh/hjSqlBackupSql.sh

# 查看定时任务状态
# systemctl status crond.service
mysql备份脚本
# nacos-server:2.2.2
podman pull nacos/nacos-server:v2.2.2
podman run -p 8848:8848 --name nacostest -d 0514f8ffee17
创建目录用来外挂文件 mkdir -p /hj_files/nacos
podman cp 73ea411a7d23:/home/nacos/logs/ /hj_files/nacos/
podman cp 73ea411a7d23:/home/nacos/conf/ /hj_files/nacos/
在 nacos/conf文件夹下有mysql-schema.sql文件
数据库创建nacos表,运行此sql文件
删除这个测试容器 podman stop rm 命令
修改配置文件(application.properties),主要是配置mysql连接信息
db.url.0=jdbc:mysql://101.35.260.230:3307/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user.0=root
db.password.0=123456

podman run -d \
--env MODE=standalone \
-e TIME_ZONE='Asia/Shanghai' \
-v /hj_files/nacos/logs:/home/nacos/logs \
-v /hj_files/nacos/conf:/home/nacos/conf \
-p 8848:8848 \
-p 9848:9848 \
-p 9849:9849 \
--name hj_nacos \
--restart=always \
0514f8ffee17


nacos.core.auth.enabled=true
nacos.core.auth.plugin.nacos.token.secret.key=${NACOS_AUTH_TOKEN:SecretKey012345678901234567890123456789012345678901234567890123456789}
nacos.core.auth.server.identity.key=${NACOS_AUTH_IDENTITY_KEY:serverIdentity}
nacos.core.auth.server.identity.value=${NACOS_AUTH_IDENTITY_VALUE:security}


# spring
server.servlet.contextPath=${SERVER_SERVLET_CONTEXTPATH:/nacos}
server.contextPath=/nacos
server.port=${NACOS_APPLICATION_PORT:8848}
server.tomcat.accesslog.max-days=30
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
server.tomcat.accesslog.enabled=${TOMCAT_ACCESSLOG_ENABLED:false}
server.error.include-message=ALWAYS
# default current work dir
server.tomcat.basedir=file:.
#*************** Config Module Related Configurations ***************#
### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced.
#spring.datasource.platform=${SPRING_DATASOURCE_PLATFORM:}
spring.datasource.platform=mysql
spring.sql.init.platform=${SPRING_DATASOURCE_PLATFORM:}
nacos.cmdb.dumpTaskInterval=3600
nacos.cmdb.eventTaskInterval=10
nacos.cmdb.labelTaskInterval=300
nacos.cmdb.loadDataAtStart=false
db.num=${MYSQL_DATABASE_NUM:1}
db.url.0=jdbc:mysql://101.33.250.220:3307/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user.0=root
db.password.0=123456
nacos.core.auth.enabled=true
### The auth system to use, currently only 'nacos' and 'ldap' is supported:
nacos.core.auth.system.type=${NACOS_AUTH_SYSTEM_TYPE:nacos}
### worked when nacos.core.auth.system.type=nacos
### The token expiration in seconds:
nacos.core.auth.plugin.nacos.token.expire.seconds=${NACOS_AUTH_TOKEN_EXPIRE_SECONDS:18000}
### The default token:
nacos.core.auth.plugin.nacos.token.secret.key=${NACOS_AUTH_TOKEN:SecretKey012345678901234567890123456789012345678901234567890123456789}
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
nacos.core.auth.caching.enabled=${NACOS_AUTH_CACHE_ENABLE:false}
nacos.core.auth.enable.userAgentAuthWhite=${NACOS_AUTH_USER_AGENT_AUTH_WHITE_ENABLE:false}
nacos.core.auth.server.identity.key=${NACOS_AUTH_IDENTITY_KEY:serverIdentity}
nacos.core.auth.server.identity.value=${NACOS_AUTH_IDENTITY_VALUE:security}
## spring security config
### turn off security
nacos.security.ignore.urls=${NACOS_SECURITY_IGNORE_URLS:/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-fe/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**}
# metrics for elastic search
management.metrics.export.elastic.enabled=false
management.metrics.export.influx.enabled=false
nacos.naming.distro.taskDispatchThreadCount=10
nacos.naming.distro.taskDispatchPeriod=200
nacos.naming.distro.batchSyncKeyCount=1000
nacos.naming.distro.initDataRatio=0.9
nacos.naming.distro.syncRetryDelay=5000
nacos.naming.data.warmup=true
nacos
# podman 运行nginx 挂载外部目录
podman pull nginx:1.25       #选择dockers.io版
# 试运行.然后进入容器找配置文件与html文件位置
podman run -d -p 8801:80  f9c14fe76d50
# 拷贝出文件
podman cp 2688e9dd841f:/etc/nginx/. /hj_files/hj_nginx_1.25/conf/
podman cp 2688e9dd841f:/usr/share/nginx/html/. /hj_files/hj_nginx_1.25/html/
# 停止并删除容器后,重新运行.可以先配置好文件与html内容
# 后面html下的内容直接外面更新,里面会同步.
# 配置文件修改 需要 podman restart 重新启动下就ok
podman run -d --privileged=true \
--name hj_nginx_1.25_8801 -p 8801:80 \
-v /hj_files/hj_nginx_1.25/conf:/etc/nginx \
-v /hj_files/hj_nginx_1.25/html:/usr/share/nginx/html \
 f9c14fe76d50
#over
# 附注几个指令
lsof -i :80
kill -9 pid
netstat -anp
netstat -anp |grep 80
ps -ef | grep nginx
nginx
docker run -d --privileged=true \
--name singapore_nginx_1.25 -p 80:80 \
-v /home/lsy/nginx/conf:/etc/nginx \
-v /home/lsy/pcAdmin:/usr/share/nginx/html \
 eea7b3dcba7e
   location / {
                 add_header 'Access-Control-Allow-Origin' '*';
                add_header 'Access-Control-Allow-Methods' 'POST,GET,OPTIONS';
                 add_header 'Access-Control-Allow-Headers' 'Authorization';
               # root   /www/wwwroot/server/pc/pcAdmin;
                root   /usr/share/nginx/html;
                index  index.html index.htm;
            }
nginx_tip
 # minio
 podman pull docker.io/minio/minio
 podman run -d \
   -p 9000:9000 \
   -p 9001:9001 \
   --name minio \
   --restart=always \
   --privileged=true \
   -v /hj_files/minio/config:/root/.minio \
   -v /hj_files/minio/data:/data \
   -e "MINIO_ROOT_USER=minioHj" \
   -e "MINIO_ROOT_PASSWORD=miniokHj123456" \
   minio/minio server /data --console-address ":9001" --address ":9000"

  然后在浏览器访问 配置 bucket key那些即可  如果想直接浏览器可以访问上传的文件 bucket权限公开即可
minio
# emqx4.4.18
podman pull emqx/emqx-ee:4.4.18

podman run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:4.4.18

# 采用下面这种方式安装啦~~~
podman pull emqx/emqx:4.4.18
podman run -d --name hj-emqx \
  -v /etc/localtime:/etc/localtime \
  -p 1883:1883 \
  -p 8081:8081 \
  -p 8083:8083 \
  -p 8084:8084 \
  -p 8883:8883 \
  -p 18083:18083 \
 a1c15a6e87c4
 默认账号是 admin 密码: public
 控制台 http://101.33.250.220:18083/#/ 修改密码 Huajian2018
 pwd 可查看当前所在绝对路径
 进入容器 修改 /opt/emqx/etc/acl.conf 文件
 {allow, {user, "dashboard"}, subscribe, ["$SYS/#"]}.

 {allow, {ipaddr, "127.0.0.1"}, pubsub, ["$SYS/#", "#"]}.

 {allow, all, subscribe, ["$SYS/brokers/+/clients/#", {eq, "#"}]}.   // 这个控制上下线提醒须放开.

 {deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.

 {allow, all}.

 重启服务后 数据会重置~
emqx4.4.18
1883    tcp协议端口,tcp监听;
18083   web端管理控制后台端口;
8081    跟配网有关, HTTP API服务默认监听端口
8083 mqtt/websocket端口,ws监听端口
8084 mqtt/websocket with ssl端口 wss监听端口 8883 mqtt/tcp ssl端口,ssl监听端口

可通过 etc/plugins/emqx_management.conf 配置文件修改监听端口
[root@ip-10-0-13-1 ~]# docker exec -it 33508e3a7cd0 /bin/bash
bash-5.1$ cd /opt/emqx/
bash-5.1$ ls
bin            data           erts-12.3.2.2  etc            lib            log            releases
bash-5.1$ cd etc/plugins/
bash-5.1$ ls
acl.conf.paho          emqx_auth_ldap.conf    emqx_auth_mysql.conf   emqx_bridge_mqtt.conf  emqx_exhook.conf       emqx_lwm2m.conf        emqx_psk_file.conf     emqx_rule_engine.conf  emqx_stomp.conf
emqx_auth_http.conf    emqx_auth_mnesia.conf  emqx_auth_pgsql.conf   emqx_coap.conf         emqx_exproto.conf      emqx_management.conf   emqx_recon.conf        emqx_sasl.conf         emqx_telemetry.conf
emqx_auth_jwt.conf     emqx_auth_mongo.conf   emqx_auth_redis.conf   emqx_dashboard.conf    emqx_lua_hook.conf     emqx_prometheus.conf   emqx_retainer.conf     emqx_sn.conf           emqx_web_hook.conf
bash-5.1$ cat emqx_management.conf 
##--------------------------------------------------------------------
## EMQX Management Plugin
##--------------------------------------------------------------------

## Max Row Limit
management.max_row_limit = 10000

## Application default secret
##
## Value: String
## management.application.default_secret = public

## Default Application ID
##
## Value: String
management.default_application.id = admin

## Default Application Secret
##
## Value: String
management.default_application.secret = public

## Initialize apps file
## Is used to add administrative app/secrets when EMQX is launched for the first time.
## This config will not take any effect once EMQX database has one or more apps.
## The file content format is as below:
##  ```
##819e5db182cf:l9C5suZClIF3FvdzWqmINrVU61WNfIjcglxw9CVM7y1VI
##bb5a6cf1c06a:WuNRRgcRTGiNcuyrE49Bpwz4PGPrRnP4hUMi647kNSbN
## ```
# management.bootstrap_apps_file = etc/bootstrap_apps.txt

##--------------------------------------------------------------------
## HTTP Listener

management.listener.http = 8081
management.listener.http.acceptors = 2
management.listener.http.max_clients = 512
management.listener.http.backlog = 512
management.listener.http.send_timeout = 15s
management.listener.http.send_timeout_close = on
management.listener.http.inet6 = false
management.listener.http.ipv6_v6only = false

##--------------------------------------------------------------------
## HTTPS Listener

## management.listener.https = 8081
## management.listener.https.acceptors = 2
## management.listener.https.max_clients = 512
## management.listener.https.backlog = 512
## management.listener.https.send_timeout = 15s
## management.listener.https.send_timeout_close = on
## management.listener.https.certfile = etc/certs/cert.pem
## management.listener.https.keyfile = etc/certs/key.pem
## management.listener.https.key_password = yourpass
## management.listener.https.cacertfile = etc/certs/cacert.pem
## management.listener.https.verify = verify_peer
## NOTE: Do not use tlsv1.3 if emqx is running on OTP-22 or earlier
## management.listener.https.tls_versions = tlsv1.3,tlsv1.2,tlsv1.1,tlsv1
## management.listener.https.ciphers = TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256,TLS_CHACHA20_POLY1305_SHA256,TLS_AES_128_CCM_SHA256,TLS_AES_128_CCM_8_SHA256,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-DES-CBC3-SHA,ECDH-ECDSA-AES256-GCM-SHA384,ECDH-RSA-AES256-GCM-SHA384,ECDH-ECDSA-AES256-SHA384,ECDH-RSA-AES256-SHA384,DHE-DSS-AES256-GCM-SHA384,DHE-DSS-AES256-SHA256,AES256-GCM-SHA384,AES256-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDH-ECDSA-AES128-GCM-SHA256,ECDH-RSA-AES128-GCM-SHA256,ECDH-ECDSA-AES128-SHA256,ECDH-RSA-AES128-SHA256,DHE-DSS-AES128-GCM-SHA256,DHE-DSS-AES128-SHA256,AES128-GCM-SHA256,AES128-SHA256,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-DSS-AES256-SHA,ECDH-ECDSA-AES256-SHA,ECDH-RSA-AES256-SHA,AES256-SHA,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,DHE-DSS-AES128-SHA,ECDH-ECDSA-AES128-SHA,ECDH-RSA-AES128-SHA,AES128-SHA
## management.listener.https.fail_if_no_peer_cert = true
## management.listener.https.inet6 = false
## management.listener.https.ipv6_v6only = false
bash-5.1$ 
emqx的配置文件

 

[root@VM-12-9-opencloudos ~]# podman exec -it b31e7cac52e2 /bin/bash

bash-4.2# mysql -uroot -p
Enter password: 

mysql> alter user 'root'@'localhost' IDENTIFIED BY 'hj20230808'
    -> ;
Query OK, 0 rows affected (0.00 sec)

mysql> alter user 'root'@'%' IDENTIFIED BY 'hj20230808';
Query OK, 0 rows affected (0.00 sec)
tip:修改docker-mysql5.7密码
[root@VM-12-9-opencloudos ~]# podman exec -it 2be1ff4cd7ca bash
root@2be1ff4cd7ca:/data# cd /usr/local/bin/
root@2be1ff4cd7ca:/usr/local/bin# redis-cli -p 6380
127.0.0.1:6380> auth 666666
OK
127.0.0.1:6380> config get requirepass;
(empty array)
127.0.0.1:6380> config get requirepass
1) "requirepass"
2) "666666"
127.0.0.1:6380> config set requirepass hj20230808
OK
127.0.0.1:6380> 
tip:修改docker-redis密码
root@7dbaa4dc9fca:/usr/local/bin# vi jenkins.sh 
bash: vi: command not found
root@7dbaa4dc9fca:/usr/local/bin# apt-get install vim
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
E: Unable to locate package vim
root@7dbaa4dc9fca:/usr/local/bin# apt-get update
Get:1 http://deb.debian.org/debian bullseye InRelease [116 kB]
Get:2 http://deb.debian.org/debian-security bullseye-security InRelease [48.4 kB]  
Get:3 http://deb.debian.org/debian bullseye-updates InRelease [44.1 kB]
Get:4 http://deb.debian.org/debian bullseye/main amd64 Packages [8183 kB]
Get:5 http://deb.debian.org/debian-security bullseye-security/main amd64 Packages [253 kB]    
Get:6 http://deb.debian.org/debian bullseye-updates/main amd64 Packages [17.3 kB]                  
Get:7 https://packagecloud.io/github/git-lfs/debian bullseye InRelease [25.8 kB]                         
Get:8 https://packagecloud.io/github/git-lfs/debian bullseye/main amd64 Packages [2128 B]
Fetched 8689 kB in 3s (3140 kB/s)  
Reading package lists... Done
root@7dbaa4dc9fca:/usr/local/bin# apt-get install vim
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
The following additional packages will be installed:
  libgpm2 vim-common vim-runtime xxd
...
tip:docker容器里面安装vim

 

[root@ip-10-0-13-1 ~]# docker pull easysoft/zentao:18.5

[root@ip-10-0-13-1 lsy]# mkdir -p /home/lsy/zentao/zentaopms
[root@ip-10-0-13-1 lsy]# mkdir -p /home/lsy/zentao/mysqldata

docker run -d --name lsy_zentao -p 8888:80 -v /home/lsy/zentao/zentaopms:/www/zentaopms -v /home/lsy/zentao/mysqldata:/var/lib/mysql 3756c794d8ad

# 开放端口 进行访问
mysql 内置的 默认密码是123456
~~~~
zentao
 tip: podman有的版本启动不了,需要配置mysql  -e MYSQL_INTERNAL=true(内置版)  

docker run -it \
    -v $PWD/data:/data \
    -e MYSQL_INTERNAL=false \
    -e ZT_MYSQL_HOST=<你的MySQL服务地址> \
    -e ZT_MYSQL_PORT=<你的MySQL服务端口> \
    -e ZT_MYSQL_USER=<你的MySQL服务用户名> \
    -e ZT_MYSQL_PASSWORD=<你的MySQL服务密码> \
    -e ZT_MYSQL_DB=<禅道数据库名> \
    hub.zentao.net/app/zentao:latest       这个连接外部mysql
zentao补充

 

# 拉取镜像
docker pull emqx/emqx:5.1.6 
# 运行
docker run -d --name hj-emqx \
  -v /etc/localtime:/etc/localtime \
  -p 1883:1883 \
  -p 8083:8083 \
  -p 8084:8084 \
  -p 8883:8883 \
  -p 18083:18083 \
 369cf6d7ddb0
# 拷贝文件
docker cp a0c8190ae9a0:/opt/emqx/etc /home/emqx/etc

# 停止,删除容器
#重新运行,挂载etc目录
docker run -d --name hj-emqx \
  -v /etc/localtime:/etc/localtime \
  -v /home/emqx/etc:/opt/emqx/etc \
  -p 1883:1883 \
  -p 8083:8083 \
  -p 8084:8084 \
  -p 8883:8883 \
  -p 18083:18083 \
 369cf6d7ddb0

# 修改acl.conf

{allow, {username, {re, "^dashboard$"}}, subscribe, ["$SYS/#"]}.

{allow, {ipaddr, "127.0.0.1"}, all, ["$SYS/#", "#"]}.

{allow, all, subscribe, ["$SYS/brokers/+/clients/#", {eq, "#"}]}.

{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.

{allow, all}.

# 修改 emqx.conf
node {
  name = "emqx@127.0.0.1"
  cookie = "emqxsecretcookie"
  data_dir = "data"
}

cluster {
  name = emqxcl
  discovery_strategy = manual
}

dashboard {
    listeners.http {
        bind = 18083
    }
}

listeners.wss.default {
    bind = "0.0.0.0:8084"
    max_connections = 512000
    websocket.mqtt_path = "/mqtt"
    ssl_options {
        keyfile = "etc/certs/hj.key"
        certfile = "etc/certs/hj_public.crt"
        cacertfile = "etc/certs/hj_chain.crt"
    }
}

# 重启容器
docker restart 1ced9b81a16a

# tip: 最好vi指令来修改那俩文件. 
# ssl文件下载自阿里云-apache 
emqx5.1.6并配置wss

# 若先直接配置文件修改完了直接启动.会报 selfxxx错误
# 若额外又挂载了 /data /log 等目录 会报 openssl xxx错误

 

 

 

 

# 这些是装在宿主机的
1 卸载原来的jdk8
[root@VM-12-9-opencloudos ~]# java -version
java version "1.8.0_361"
Java(TM) SE Runtime Environment (build 1.8.0_361-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.361-b09, mixed mode)
[root@VM-12-9-opencloudos ~]# which java
/usr/local/jdk/jdk1.8/bin/java
[root@VM-12-9-opencloudos ~]# rm -rf /usr/local/jdk
[root@VM-12-9-opencloudos ~]# java -version
-bash: /usr/local/jdk/jdk1.8/bin/java: No such file or directory
[root@VM-12-9-opencloudos ~]# 
还需要将配置文件解压目录都对应给删了.
2 安装jdk17
tar -zxvf jdk-17_linux-x64_bin.tar.gz
 mkdir -p /usr/local/jdk
 mv /hj_files/jdk-17.0.7/ /usr/local/jdk/jdk17
vi /etc/profile
# 部分摘录如下
unset -f pathmunge

if [ -n "${BASH_VERSION-}" ] ; then
        if [ -f /etc/bashrc ] ; then
                # Bash login shells run only /etc/profile
                # Bash non-login shells run only /etc/bashrc
                # Check for double sourcing is done in /etc/bashrc.
                . /etc/bashrc
       fi
fi
export JAVA_HOME=/usr/local/jdk/jdk17
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export PATH=$PATH:${JAVA_PATH}
-- INSERT --
# 刷新配置
source /etc/profile
java -version

历史版本下载地址
https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/
# 解压 安装 配置环境变量
 tar -zxvf apache-maven-3.8.6-bin.tar.gz
mkdir -p /usr/local/maven
 mv /hj_files/apache-maven-3.8.6 /usr/local/maven/maven-3.8.6
vi /etc/profile
# 部分摘取如下
export JAVA_HOME=/usr/local/jdk/jdk17
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export PATH=$PATH:${JAVA_PATH}
export MAVEN_HOME=/usr/local/maven/maven-3.8.6
export PATH=${PATH}:${MAVEN_HOME}/bin
-- INSERT --
# 刷新配置
source /etc/profile
 mvn -version
maven jdk17

 

配置远程调试代码

服务器启动代码   java -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=7075 -jar lsy-api-0.0.1-SNAPSHOT.jar --server.port=8008

postman请求   http://120.78.105.211:8008/api/lsypattern/page    会跑到本地idea代码里面~

 

https://zh.globalpetrolprices.com/Belgium/

https://www.epexspot.com/en

https://www.check24.de/

https://ahjregistry.myorangebutton.com/#/APIDoc

https://www.carbonict.com/doc/23

 

 

 

systemctl stop docker
  945  yum list installed |grep docker
  947  yum -y remove containerd.x86_64   docker.x86_64  runc.x86_64
  948  yum list installed |grep docker
  949  rpm -qa |grep docker
  950  rm -rf /var/lib/docker
  952  yum -y install docker
  953  docker 
  954  docker ps
  955  systemctl start docker
  
  957  docker ps


docker  pull redis:6.2.11

docker  run -d --privileged=true \
-p 6380:6380 \
-v /etc/lsy_redis6.2.11/conf/redis.conf:/etc/redis/redis.conf \
-v /etc/lsy_redis6.2.11/data/:/data \
 --name  esy_redis6.2.11 \
 1c51c2b5aeb2 \
 redis-server /etc/redis/redis.conf 


docker pull nginx:1.25

docker run -d --privileged=true \
--name esy_nginx_1.25 -p 80:80 \
-v /home/lsy/nginx/conf:/etc/nginx \
-v /home/lsy/pcAdmin:/home/lsy/pcAdmin \
 e784f4560448


docker pull emqx/emqx:5.1.6 

docker run -d --name esy-emqx \
  -v /etc/localtime:/etc/localtime \
  -v /home/emqx/etc:/opt/emqx/etc \
  -p 1883:1883 \
  -p 8083:8083 \
  -p 8084:8084 \
  -p 8883:8883 \
  -p 18083:18083 \
 369cf6d7ddb0
docker误删文件重置

 

posted @ 2023-05-11 10:32  独孤~华剑  阅读(90)  评论(0编辑  收藏  举报
独孤华剑