记录一次docker快速启动elasticsearch单机服务

记录一次docker快速启动elasticsearch单机服务

注意事项

  1. 使用df -h ${dir} 确定挂载目录磁盘容量 避免选择较小磁盘
  2. 使用lsof -i:${port} 确定宿主机端口没有被占用
  3. 挂载目录赋予可读可写的权限

具体步骤

    cd  /home/aicc/docker/
    mkdir es
    mkdir data
    mkdir config
    mkdir plugins
    mkdir logs
    chmod -R 777 /home/aicc/docker/es/
    cd config/
    vim elasticsearch.yml
    vim startEs.sh
    chmod -R 700 startEs.sh
    ./startEs.sh   

elasticsearch.yml

```
    # Elasticsearch 配置文件
    # 本文件用于配置 Elasticsearch 的运行参数

    # 设置 HTTP 接口绑定的 IP 地址
    # 0.0.0.0 表示绑定所有网络接口,使 Elasticsearch 可以从任何网络地址访问
    http.host: 0.0.0.0

    # 启用跨域资源共享 (CORS)
    http.cors.enabled: true
    # 允许所有来源的跨域请求
    # 注意:在生产环境中,建议指定具体的域名以增强安全性
    http.cors.allow-origin: "*"

    # 设置日志级别
    # 这里设置 org.elasticsearch 包的日志级别为 DEBUG
    logger.org.elasticsearch: DEBUG

    # 设置网络绑定的 IP 地址
    # 0.0.0.0 表示绑定所有网络接口 如果9300不能访问 请排查是否此设置配置出问题
    network.host: 0.0.0.0

    # 设置集群名称
    # 所有节点必须具有相同的集群名称才能加入同一个集群
    cluster.name: aicc-znwh-cluster

    # 设置节点名称
    # 每个节点应该有一个唯一的名称
    node.name: node1

    # 设置 HTTP 端口号
    # 默认情况下,Elasticsearch 通过 9200 端口提供 REST API
    http.port: 9200

    # 设置传输层 TCP 端口号
    # 该端口用于节点之间的内部通信
    transport.tcp.port: 9300
```

startEs.sh

# 运行 Elasticsearch 容器
docker run \
  --name es-6.8.8.0 \                       # 设置容器名称为 es-6.8.8.0
  --restart=always \                        # 设置容器总是自动重启
  -p 9200:9200 \                            # 映射宿主机的 9200 端口到容器的 9200 端口(HTTP 端口)
  -p 9300:9300 \                            # 映射宿主机的 9300 端口到容器的 9300 端口(传输层端口)
  -e "discovery.type=single-node" \         # 设置集群发现类型为单节点模式
  -e ES_JAVA_OPTS="-Xms4g -Xmx4g" \         # 设置 JVM 最小和最大堆内存为 4GB
  -e I18N_LOCALE="zh-CN" \                  # 设置国际化区域为中国大陆
  --log-opt max-size=100m \                 # 设置日志文件的最大大小为 100MB
  --log-opt max-file=5 \                    # 设置最多保留 5 个日志文件
  --memory="6g" \                           # 设置容器内存最大6g
  --memory-swap="6g"\                       # 设置交换内存最大6g 
  -v /home/aicc/docker/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \  # 挂载自定义的 elasticsearch.yml 配置文件
  -v /home/aicc/docker/es/config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties \  # 挂载自定义的 log4j2.properties 文件
  -v /home/aicc/docker/es/data:/usr/share/elasticsearch/data \  # 挂载数据目录
  -v /home/aicc/docker/es/plugins:/usr/share/elasticsearch/plugins \  # 挂载插件目录
  -v /home/aicc/docker/es/logs:/usr/share/elasticsearch/logs/ \  # 挂载日志目录
  -d \                                      # 后台运行容器
  docker.elastic.co/elasticsearch/elasticsearch:6.8.8  # 使用指定版本的 Elasticsearch 镜像

限制容器内存

对于已经启动的容器服务 使用如下命令进行限制

docker update --memory="2g" --memory-swap="2g" 5b536106a059 a41d65d50110

log4j2.properties(按需配置)

点击查看代码
status = error

# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug

appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n

appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.fileIndex = nomax
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB

rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling

appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4

logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false

appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true

logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.additivity = false

appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
appender.index_indexing_slowlog_rolling.policies.time.modulate = true

logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false


appender.audit_rolling.type = RollingFile
appender.audit_rolling.name = audit_rolling
appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.log
appender.audit_rolling.layout.type = PatternLayout
appender.audit_rolling.layout.pattern = {\
                "@timestamp":"%d{ISO8601}"\
                %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
                %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
                %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
                %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
                %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
                %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
                %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
                %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
                %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
                %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
                %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
                %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
                %varsNotEmpty{, "user.roles":%map{user.roles}}\
                %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
                %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
                %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
                %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
                %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
                %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
                %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
                %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
                %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
                %varsNotEmpty{, "indices":%map{indices}}\
                %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
                %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
                %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
                %varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\
                }%n
# "node.name" node name from the `elasticsearch.yml` settings
# "node.id" node id which should not change between cluster restarts
# "host.name" unresolved hostname of the local node
# "host.ip" the local bound ip (i.e. the ip listening for connections)
# "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
# "user.name" the subject name as authenticated by a realm
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
# "user.realm" the name of the realm that authenticated "user.name"
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
# "user.roles" the roles array of the user; these are the roles that are granting privileges
# "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter"
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
# "request.body" the content of the request body entity, JSON escaped
# "request.id" a synthentic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
# "indices" the array of indices that the "action" is acting upon
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
# "rule" name of the applied rulee if the "origin.type" is "ip_filter"
# "event.category" fixed value "elasticsearch-audit"

appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.log
appender.audit_rolling.policies.type = Policies
appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.audit_rolling.policies.time.interval = 1
appender.audit_rolling.policies.time.modulate = true

appender.deprecated_audit_rolling.type = RollingFile
appender.deprecated_audit_rolling.name = deprecated_audit_rolling
appender.deprecated_audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access.log
appender.deprecated_audit_rolling.layout.type = PatternLayout
appender.deprecated_audit_rolling.layout.pattern = [%d{ISO8601}] %m%n
appender.deprecated_audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access-%d{yyyy-MM-dd}.log
appender.deprecated_audit_rolling.policies.type = Policies
appender.deprecated_audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.deprecated_audit_rolling.policies.time.interval = 1
appender.deprecated_audit_rolling.policies.time.modulate = true

logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
logger.xpack_security_audit_logfile.level = info
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
logger.xpack_security_audit_logfile.additivity = false

logger.xpack_security_audit_deprecated_logfile.name = org.elasticsearch.xpack.security.audit.logfile.DeprecatedLoggingAuditTrail
# set this to "off" instead of "info" to disable the deprecated appender
# in the 6.x releases both the new and the previous appenders are enabled
# for the logfile auditing
logger.xpack_security_audit_deprecated_logfile.level = info
logger.xpack_security_audit_deprecated_logfile.appenderRef.deprecated_audit_rolling.ref = deprecated_audit_rolling
logger.xpack_security_audit_deprecated_logfile.additivity = false

logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
logger.xmlsig.level = error
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
logger.samlxml_decrypt.level = fatal
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
logger.saml2_decrypt.level = fatal

posted @ 2024-11-09 20:11  贺艳峰  阅读(33)  评论(0编辑  收藏  举报