JoyLau's Blog

JoyLau 的技术学习与思考

记录下 OpenApi3 + SpringCloud Gateway 聚合文档的过程

组件选型

  1. SpringDoc
  2. Knife4j
  3. SpringCloud Gateway

项目配置

在所有的 spring boot 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

在 gateway 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-webflux-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

并且需要排除 springdoc-openapi-ui 的依赖

OpenAPI 配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Configuration
@AllArgsConstructor
public class SwaggerConfiguration {
private final Environment environment;

@Bean
public OpenAPI openAPI() {
return new OpenAPI()
.info(info());
}

private Info info() {
return new Info()
.title("xxxx")
.description(environment.getProperty("spring.application.name") + " 服务 API 文档")
.version("xx")
.contact(new Contact().name("xxx").url("xxx").email("xxxxx"))
.summary("OpenAPI 文档");
}
}

文档聚合

聚合 swagger 添加分组

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Component
@AllArgsConstructor
public class SwaggerConfig {
public static final String MODULE_SUB_PREFIX = "ReactiveCompositeDiscoveryClient_";

private final SwaggerUiConfigParameters swaggerUiConfigParameters;

private final RouteLocator routeLocator;

@Scheduled(fixedDelay = 20000)
public void apis() {
swaggerUiConfigParameters.getUrls().clear();
routeLocator.getRoutes().subscribe(routeDefinition -> {
if (routeDefinition.getId().contains(MODULE_SUB_PREFIX)) {
String name = routeDefinition.getId().substring(MODULE_SUB_PREFIX.length());
swaggerUiConfigParameters.addGroup(name);
}
});
}
}

修改 /v3/api-docs/ 报文添加 basePath 使得 Knife4j 在聚合文档下能正常调试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@Component
public class AddBasePathFilterFactory extends AbstractGatewayFilterFactory<AddBasePathFilterFactory.Config> {

private final ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory;

public AddBasePathFilterFactory(ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory) {
super(Config.class);
this.modifyResponseBodyGatewayFilterFactory = modifyResponseBodyGatewayFilterFactory;
}

@Override
public GatewayFilter apply(Config config) {
ModifyResponseBodyGatewayFilterFactory.Config cf = new ModifyResponseBodyGatewayFilterFactory.Config()
.setRewriteFunction(JsonNode.class, JsonNode.class,
(e, jsonNode) -> Mono.justOrEmpty(addBasePath(e, jsonNode)));
return modifyResponseBodyGatewayFilterFactory.apply(cf);
}

@Override
public String name() {
return "AddBasePath";
}

@Setter
public static class Config {
}

private JsonNode addBasePath(ServerWebExchange exchange, JsonNode jsonNode) {
if (jsonNode.isObject()) {
ObjectNode node = (ObjectNode) jsonNode;
String basePath = exchange.getRequest().getPath().subPath(4).value();
node.put("basePath", basePath);
return node;
}
return jsonNode;
}
}

网关路由配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
spring:
cloud:
gateway:
routes:
# openapi /v3/api-docs/组名 转 /组名/v3/api-docs; 再加 basePath 属性
- id: openapi
uri: http://localhost:${server.port}
predicates:
- Path=/v3/api-docs/**
filters:
- RewritePath=/v3/api-docs/(?<path>.*), /$\{path}/v3/api-docs
- AddBasePath
# 主页面重定向到文档聚合页面
- id: doc
uri: http://localhost:${server.port}
predicates:
- Path=/
filters:
- RedirectTo=302, /doc.html

说明

记录下 EFK(Elasticsearch8 + FileBeat + Kibana) 日志分析平台搭建
并加以用户名密码保护

证书生成

先启动一个 ES 节点,进入节点后使用下面的命令生成证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";

详细查看 官方文档

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
version: "2.2"
services:
es01:
image: elasticsearch:${STACK_VERSION}
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./es01-data:/usr/share/elasticsearch/data
container_name: elasticsearch-01
restart: always
ports:
- ${ES_PORT}:9200
networks:
- elastic
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
#- cluster.initial_master_nodes=es01,es02,es03
#- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- bootstrap.memory_lock=true
- xpack.security.enabled=true
#- xpack.security.http.ssl.enabled=true
#- xpack.security.http.ssl.key=certs/es01/es01.key
#- xpack.security.http.ssl.certificate=certs/es01/es01.crt
#- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
#- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt http://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
image: kibana:${STACK_VERSION}
container_name: kibana
restart: always
volumes:
- ./certs:/usr/share/kibana/config/certs
- ./kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
networks:
- elastic
environment:
- SERVERNAME=192.168.1.21
- SERVER_BASEPATH=/kibana
- SERVER_REWRITEBASEPATH=true
- ELASTICSEARCH_HOSTS=http://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
- I18N_LOCALE=zh-CN
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601/kibana | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
filebeat:
depends_on:
es01:
condition: service_healthy
image: elastic/filebeat:${STACK_VERSION}
container_name: filebeat
ports:
- 6115:6115
restart: always
volumes:
- ./filebeat-data/filebeat.yml:/usr/share/filebeat/filebeat.yml
- ./filebeat-data/filebeat.template.json:/usr/share/filebeat/filebeat.template.json
networks:
- elastic
networks:
elastic:

.env 文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=xxxxx

# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=Kaiyuan@2022

# Version of Elastic products
STACK_VERSION=8.4.0

# Set the cluster name
CLUSTER_NAME=docker-cluster

# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial

# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200

# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80

# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824

# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=myproject

背景

记录一下 Atlassian 旗下的 Confluence 和 Jira 免费 License 申请

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
version: "3"
services:
confluence:
image: atlassian/confluence
container_name: confluence
restart: always
ports:
- 8090:8090
- 8091:8091
volumes:
- ./confluence-data:/var/atlassian/application-data/confluence
mysql:
image: mysql:8.0.22
container_name: mysql
security_opt:
- seccomp:unconfined
ports:
- 6101:3306
restart: always
volumes:
- ./mysql-data:/var/lib/mysql
- ./my.cnf:/etc/mysql/my.cnf
environment:
- MYSQL_ROOT_PASSWORD=Kaiyuan@2020
- TZ=Asia/Shanghai
jira:
image: atlassian/jira-software
container_name: jira
restart: always
ports:
- 8080:8080
volumes:
- ./jira-data:/var/atlassian/application-data/jira

my.cnf

1
2
3
4
5
6
7
8
9
10
11
12
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
secure-file-priv= NULL

# Custom config should go here
!includedir /etc/mysql/conf.d/
max_connections=1024

sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'
transaction-isolation=READ-COMMITTED

License 申请

Atlassian 的网站
点击 New Trial License 申请新的 License
选择 Confluence 再选择 Confluence (Data Center)
填入 Server ID 即可申请免费的一个月的 License
一个月到期后再次申请即可

背景

有时我们自建了 Jira 站点,又搭建了 Confluence 服务,想着不用再做一次用户的新增,可以使用 Jira 的用户到 Confluence 中使用

操作

第一步 在 Jira 中配置用户服务器

image-20221028183734308

其中 IP 地址指的是 confluence 服务所在的地址,相当于白名单地址的意思

第二步 配置 Confluence

配置 Confluence 的 用户目录 选项

image-20221028184043190

点击 “测试并保存” 完成数据的同步

背景

有时 Nacos 单机 MySQL 版重启服务器后无法提供服务, 是因为重启时均启动 nacos 服务和 MySQL 服务,而MySQL 服务启动的较慢, nacos 在启动的时候还连接不上数据库导致 Nacos 服务无法正常提供服务
这里我的解决方式是使用 Nacos 单机 Derby 版

部署

docker-compose.yml 文件内容如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
version: "2"
services:
nacos:
image: nacos/nacos-server:v2.1.0
container_name: nacos
restart: always
environment:
- PREFER_HOST_MODE=ip
- MODE=standalone
- NACOS_AUTH_ENABLE=true
volumes:
- ./standalone-logs/:/home/nacos/logs
- ./data:/home/nacos/data
ports:
- "8848:8848"

在 当前目录下新建文件夹 datastandalone-logs 启动服务即可

背景

有时服务器断电后再开机启动需要启动一些服务,除了正常的 rc 命令可以实现外,之前我常用的是 systemctl service, 今天发现 crontab 的 reboot 标签也可以实现

使用

crontab -e

1
2
3
4
# 启动后 120 秒启动 canal adapter
@reboot sleep 120; cd /data/msmp-service/canal/canal.adapter-1.1.5/bin && sh restart.sh

@reboot sleep 600; cd /data/gateway && sh handler.sh restart

保存后即可生效

一般情况下,我会 sleep 一段时间再启动服务,因为要等其他 systemd 服务启动完成,比如数据库服务

查看证书有效期

kubeadm alpha certs check-expiration

更新证书,如果是HA集群模式,所有master需要执行

kubeadm alpha certs renew all

证书过期kubectl命令无法使用

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) $HOME/.kube/config

查看 kubectl 命令是否可用

如果不可用的话,使用下面命令重启 kube-apiserver, kube-controller-manager, kube-scheduler、etcd

docker ps | grep -v pause | grep -E "etcd|scheduler|controller|apiserver" | awk '{print $1}' | awk '{print "docker","restart",$1}' | bash

脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
/* 
* typora插入图片调用此脚本,上传图片到 MinIO
*/

const path = require('path')
// minio for node.js
const Minio = require('minio')
const { promises } = require('fs')

const endPoint = "xxxx"
const port = xxxx
const accessKey = "xxxx"
const secretKey = "xxxx"
const bucketName = "typora"
const filePath = new Date().getFullYear() + "-" + (new Date().getMonth() + 1) + "/" + new Date().getDate()


// 解析参数, 获取图片的路径,有可能是多张图片
const parseArgv = () => {
const imageList = process.argv.slice(2).map(u => path.resolve(u))
console.info("选择的文件列表:", imageList)
return imageList
}
// 永久地址的策略
const policy = () => {
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Action": [
"s3:GetBucketLocation",
"s3:ListBucket",
"s3:ListBucketMultipartUploads"
],
"Resource": [
"arn:aws:s3:::" + bucketName
]
},
{
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Action": [
"s3:ListMultipartUploadParts",
"s3:PutObject",
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject"
],
"Resource": [
"arn:aws:s3:::" + bucketName + "/*"
]
}
]
}
}

// 入口
const uploadImageFile = async (imageList = []) => {
// 创建连接
const minioClient = new Minio.Client({
endPoint: endPoint,
port: port,
useSSL: false,
accessKey: accessKey,
secretKey: secretKey
})
// 判断桶是否存在
minioClient.bucketExists(bucketName, function (err, exists) {
if (!exists) {
// 创建桶
minioClient.makeBucket(bucketName, '', function (err) {
if (err) throw err
// 设置桶策略
minioClient.setBucketPolicy(bucketName, JSON.stringify(policy()), function (err) {
if (err) throw err
})
})
}
// 开始上传图片
imageList.map(image => {
// 图片重命名
const name = `${Date.now()}${path.extname(image)}`
// 将图片上传到 bucket 上
minioClient.fPutObject(bucketName, filePath + "/" + name, image, {}, function(info) {
const url = `http://${endPoint}:${port}/${bucketName}/${filePath}/${name}`
// Typora会提取脚本的输出作为地址,将markdown上图片链接替换掉
console.log(url)
})
})
})
}

// 执行脚本
uploadImageFile(parseArgv())

在该脚本目录下执行
npm intsall minio

配置

在脚本中配置 MinIO 服务端的配置项

在 Typora 进行如下配置

image-20220318152717467

该脚本支持

  • 自动创建 bucket
  • 多文件批量上传
0%