- 在 Windows 本地创建一个目录(比如 D:\docker-log-system),用于存放配置文件和日志挂载目录,最终结构如下:
1
2
3
4
5docker-log-system/
├── filebeat/ # Filebeat配置目录
│ └── filebeat.yml # Filebeat核心配置
├── logs/ # 挂载Java应用日志的目录(你本地的Java日志放这里)
└── docker-compose.yml # 一键启动ES+Kibana+Filebeat的配置文件 - 创建 Dockerfile 文件,内容如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60version: '3.8'
services:
# Elasticsearch服务
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.9
container_name: es7
environment:
- "discovery.type=single-node" # 单节点模式(测试用)
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" # 限制内存(Windows Docker默认内存有限,避免卡死)
- "xpack.security.enabled=false" # 关闭安全验证(测试用)
ports:
- "9200:9200" # ES访问端口
- "9300:9300"
volumes:
- es-data:/usr/share/elasticsearch/data # 数据持久化
networks:
- log-network
# Kibana服务(可视化ES日志)
kibana:
image: docker.elastic.co/kibana/kibana:7.17.9
container_name: kib7
environment:
- "ELASTICSEARCH_HOSTS=http://elasticsearch:9200" # 连接ES容器(容器内域名)
ports:
- "5601:5601" # Kibana访问端口
depends_on:
- elasticsearch # 先启动ES再启动Kibana
networks:
- log-network
# Filebeat服务(采集日志到ES)
filebeat:
image: docker.elastic.co/beats/filebeat:7.17.9
container_name: fb7
user: root # 解决Windows挂载目录的权限问题
volumes:
# 挂载Filebeat配置文件
- ./filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
# 挂载Windows本地的日志目录(核心!D:\docker-log-system\logs对应容器内的/logs)
- ./logs:/logs:ro
# 挂载Filebeat的数据目录(持久化注册信息,避免重复采集)
- filebeat-data:/usr/share/filebeat/data
depends_on:
- elasticsearch
networks:
- log-network
# 添加启动命令,强制关闭权限校验(核心修改2)
command: ["filebeat", "-e", "-c", "/usr/share/filebeat/filebeat.yml", "--strict.perms=false"]
# 自定义网络(让容器之间互通)
networks:
log-network:
driver: bridge
# 数据卷(持久化ES和Filebeat数据)
volumes:
es-data:
filebeat-data: - 创建 filebeat/filebeat.yml 文件,内容如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33# 关闭默认的输出(避免冲突)
output.elasticsearch:
enabled: false
setup.kibana:
enabled: false
# 日志采集配置(核心)
filebeat.inputs:
- type: log
enabled: true
# 采集容器内的/logs目录(对应Windows的D:\docker-log-system\logs)
paths:
- /logs/*.log
# 解决Java多行日志(堆栈信息)被拆分的问题
multiline.type: pattern
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}' # 匹配以日期开头的行作为日志起始
multiline.negate: true
multiline.match: after
# 输出到Elasticsearch(容器内域名,和Compose中的ES服务名一致)
output.elasticsearch:
enabled: true
hosts: ["elasticsearch:9200"] # ES容器的地址(容器间互通)
# 自动创建按天拆分的索引(方便管理)
index: "java-app-log-%{+yyyy.MM.dd}"
# 自动设置Kibana索引模式(方便Kibana识别日志索引)
setup.kibana:
host: "kibana:5601"
setup.template.enabled: true
setup.template.pattern: "java-app-log-*"
setup.template.name: "java-app-log"
setup.ilm.enabled: false # 关闭生命周期管理(测试用) - 启动容器
docker-compose up -d
- 自己springboot服务中新增日志logback.xml文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="true">
<springProperty scope="context" name="logPath" source="logging.file.path" defaultValue="C:/docker-log-system/logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<immediateFlush>true</immediateFlush>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %highlight(%-5level) --- [%15.15(%thread)] %cyan(%-40.40(%logger{40})) : %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="info_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${logPath}/java-app-log-info.log</File>
<append>true</append>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>DENY</onMatch>
<onMismatch>ACCEPT</onMismatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logPath}/java-app-log-info.%d.%i.log</fileNamePattern>
<maxHistory>30</maxHistory>
<!-- 每个日志文件到10mb的时候开始切分,最多保留30天或10G,哪怕没到30天也要删除多余的日志 -->
<totalSizeCap>10GB</totalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="error_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${logPath}/java-app-log-error.log</File>
<append>true</append>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${logPath}/java-app-log-error.%d.%i.log</fileNamePattern>
<maxHistory>30</maxHistory>
<!-- 每个日志文件到10mb的时候开始切分,最多保留30天或者10G,哪怕没到30天也要删除多余的日志 -->
<totalSizeCap>10GB</totalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT" />
<appender-ref ref="info_log" />
<appender-ref ref="error_log" />
</root>
</configuration> - es状态: 打开浏览器访问 ES 地址:http://localhost:9200/_cat/indices?v
访问 Kibana:http://localhost:5601