first commit

main
曾威 3 years ago
commit acd7bd7c11

3
.gitignore vendored

@ -0,0 +1,3 @@
/harbor/harbor-offline-installer-v2.7.0.tgz
/uptime-kuma/uptime-kuma/kuma*

@ -0,0 +1,3 @@
{
"editor.acceptSuggestionOnEnter": "on"
}

@ -0,0 +1,21 @@
使用switchHosts做本地dns解析启动时需要使用管理员身份
harbor中的nginx端口使用非80端口目前使用的是6080
# ubuntu系统启用root用户远程登陆
[ubuntu系统启用root用户远程登录](http://t.zoukankan.com/weifeng1463-p-15293210.html)
# 私人网盘cloudreve
https://docs.cloudreve.org/getting-started/install
# docker配置国内源
https://blog.csdn.net/gengkui9897/article/details/127348289
# maven私有仓库 nexus
[(115条消息) DockerCompose - 部署 Nexus 私服_docker-compose nexus_云原生.乔豆麻袋.cn的博客-CSDN博客](https://blog.csdn.net/qiaohao0206/article/details/125471721)

@ -0,0 +1,62 @@
1. Active Directory Domain Admin
2. Google Workspace Admin Console
3. GitLab
4. Liferay TFS
5. Smartfind Express
6. SLO/LEAP FE
7. Tririga
8. Moodle
9. Oracle MHC Document Express
10. MHC DSS
11. Filenexus
12. Lawson ADFS Integration
13. eDoc
14. Serv-U (FTP)
15. SchoolBooks
16. TaxFactory
17. Azure/VIsual Studio/Microsoft
18. PowerBi/SSRS Reporting Server
19. Kinsey Clever Schoology
20. Illuminate
21. Seesaw
22. SchoolMessenger
23. Schoolmint/Whetstone
24. SchoolMint/SchoolChoice
25. Istation I-Ready
26. Azure/Intune/Office 365
27. Mosyle AWS/App Stream
28. Apple DEP
29. Apple Classroom Manager
30. Adobe Admin Console
31. Lucid Admin Console Confluence
32. Jira
33. DBAs and IC Super User
34. Lawson/Infor Super User
35. Enrich
36. Tangara
37. Specified Security applications
38. Tenable SC
39. And other applications as defined.
## 更多项目
### 基于模板的word生成
[https://github.com/Sayi/poi-tl](https://github.com/Sayi/poi-tl)
### 海报生成(中文)
[https://github.com/psoho/fast-poster](https://github.com/psoho/fast-poster)
### 可视化HTML编辑
[https://github.com/GrapesJS/grapesjs](https://github.com/GrapesJS/grapesjs)
### 工作流
[https://docs.camunda.org/manual/7.15/](https://docs.camunda.org/manual/7.15/)
### 工作流可视化
[https://bpmn.io/](https://bpmn.io/)

@ -0,0 +1,33 @@
version: "3.8"
services:
cloudreve:
container_name: cloudreve
image: cloudreve/cloudreve:latest
restart: unless-stopped
ports:
- "5212:5212"
volumes:
- temp_data:/data
- ./cloudreve/uploads:/cloudreve/uploads
- ./cloudreve/conf.ini:/cloudreve/conf.ini
- ./cloudreve/cloudreve.db:/cloudreve/cloudreve.db
- ./cloudreve/avatar:/cloudreve/avatar
depends_on:
- aria2
aria2:
container_name: aria2
image: p3terx/aria2-pro
restart: unless-stopped
environment:
- RPC_SECRET=your_aria_rpc_token
- RPC_PORT=6800
volumes:
- ./aria2/config:/config
- temp_data:/data
volumes:
temp_data:
driver: local
driver_opts:
type: none
device: $PWD/data
o: bind

@ -0,0 +1,2 @@
[Info] 2023-01-27 17:52:26 Admin user name: admin@cloudreve.org
[Info] 2023-01-27 17:52:26 Admin password: 6opLNrQt

@ -0,0 +1,77 @@
version: '3.3'
services:
master:
image: tikazyq/crawlab:latest
container_name: master
ports:
- "8080:8080" # frontend port mapping 前端端口映射
depends_on:
- mongo
- redis
volumes:
- "./master/log:/var/logs/crawlab" # log persistent 日志持久化
environment:
# CRAWLAB_API_ADDRESS: "https://<your_api_ip>:<your_api_port>" # backend API address 后端 API 地址. 适用于 https 或者源码部署
CRAWLAB_SERVER_MASTER: "Y" # whether to be master node 是否为主节点,主节点为 Y工作节点为 N
CRAWLAB_MONGO_HOST: "mongo" # MongoDB host address MongoDB 的地址,在 docker compose 网络中,直接引用服务名称
# CRAWLAB_MONGO_PORT: "27017" # MongoDB port MongoDB 的端口
# CRAWLAB_MONGO_DB: "crawlab_test" # MongoDB database MongoDB 的数据库
# CRAWLAB_MONGO_USERNAME: "username" # MongoDB username MongoDB 的用户名
# CRAWLAB_MONGO_PASSWORD: "password" # MongoDB password MongoDB 的密码
# CRAWLAB_MONGO_AUTHSOURCE: "admin" # MongoDB auth source MongoDB 的验证源
CRAWLAB_REDIS_ADDRESS: "redis" # Redis host address Redis 的地址,在 docker compose 网络中,直接引用服务名称
# CRAWLAB_REDIS_PORT: "6379" # Redis port Redis 的端口
# CRAWLAB_REDIS_DATABASE: "1" # Redis database Redis 的数据库
# CRAWLAB_REDIS_PASSWORD: "password" # Redis password Redis 的密码
# CRAWLAB_LOG_LEVEL: "info" # log level 日志级别. 默认为 info
# CRAWLAB_LOG_ISDELETEPERIODICALLY: "N" # whether to periodically delete log files 是否周期性删除日志文件. 默认不删除
# CRAWLAB_LOG_DELETEFREQUENCY: "@hourly" # frequency of deleting log files 删除日志文件的频率. 默认为每小时
# CRAWLAB_SERVER_REGISTER_TYPE: "mac" # node register type 节点注册方式. 默认为 mac 地址,也可设置为 ip防止 mac 地址冲突)
# CRAWLAB_SERVER_REGISTER_IP: "127.0.0.1" # node register ip 节点注册IP. 节点唯一识别号,只有当 CRAWLAB_SERVER_REGISTER_TYPE 为 "ip" 时才生效
# CRAWLAB_TASK_WORKERS: 8 # number of task executors 任务执行器个数(并行执行任务数)
# CRAWLAB_RPC_WORKERS: 16 # number of RPC workers RPC 工作协程个数
# CRAWLAB_SERVER_LANG_NODE: "Y" # whether to pre-install Node.js 预安装 Node.js 语言环境
# CRAWLAB_SERVER_LANG_JAVA: "Y" # whether to pre-install Java 预安装 Java 语言环境
# CRAWLAB_SETTING_ALLOWREGISTER: "N" # whether to allow user registration 是否允许用户注册
# CRAWLAB_SETTING_ENABLETUTORIAL: "N" # whether to enable tutorial 是否启用教程
# CRAWLAB_NOTIFICATION_MAIL_SERVER: smtp.exmaple.com # STMP server address STMP 服务器地址
# CRAWLAB_NOTIFICATION_MAIL_PORT: 465 # STMP server port STMP 服务器端口
# CRAWLAB_NOTIFICATION_MAIL_SENDEREMAIL: admin@exmaple.com # sender email 发送者邮箱
# CRAWLAB_NOTIFICATION_MAIL_SENDERIDENTITY: admin@exmaple.com # sender ID 发送者 ID
# CRAWLAB_NOTIFICATION_MAIL_SMTP_USER: username # SMTP username SMTP 用户名
# CRAWLAB_NOTIFICATION_MAIL_SMTP_PASSWORD: password # SMTP password SMTP 密码
worker:
image: tikazyq/crawlab:latest
container_name: worker
environment:
CRAWLAB_SERVER_MASTER: "N"
CRAWLAB_MONGO_HOST: "mongo"
CRAWLAB_REDIS_ADDRESS: "redis"
depends_on:
- mongo
- redis
# environment:
# MONGO_INITDB_ROOT_USERNAME: username
# MONGO_INITDB_ROOT_PASSWORD: password
volumes:
- "./worker/log:/var/logs/crawlab" # log persistent 日志持久化
mongo:
image: mongo:latest
restart: always
volumes:
- "./mongo/data/db:/data/db" # make data persistent 持久化
# ports:
# - "27017:27017" # expose port to host machine 暴露接口到宿主机
redis:
image: redis:latest
restart: always
# command: redis-server --requirepass "password" # set redis password 设置 Redis 密码
volumes:
- "./redis/data:/data" # make data persistent 持久化
# ports:
# - "6379:6379" # expose port to host machine 暴露接口到宿主机
# splash: # use Splash to run spiders on dynamic pages
# image: scrapinghub/splash
# container_name: splash
# ports:
# - "8050:8050"

@ -0,0 +1,29 @@
version: '3.3'
services:
master:
image: crawlabteam/crawlab
container_name: crawlab-master
ports:
- "8080:8080" # frontend port mapping 前端端口映射
depends_on:
- crawlab-mongo
volumes:
- "./master:/data"
environment:
CRAWLAB_NODE_MASTER: "Y" # Y: 主节点
CRAWLAB_MONGO_HOST: "crawlab-mongo" # mongo host address. 在 Docker-Compose 网络中,直接引用 service 名称
CRAWLAB_MONGO_PORT: "27017" # mongo port
CRAWLAB_MONGO_DB: "crawlab" # mongo database
CRAWLAB_MONGO_USERNAME: "username" # mongo username
CRAWLAB_MONGO_PASSWORD: "password" # mongo password
CRAWLAB_MONGO_AUTHSOURCE: "admin" # mongo auth source
crawlab-mongo:
image: mongo:4.2
container_name: crawlab-mongo
environment:
MONGO_INITDB_ROOT_USERNAME: "username" # mongo username
MONGO_INITDB_ROOT_PASSWORD: "password" # mongo password
volumes:
- "./mongo/data/db:/data/db" # 持久化 mongo 数据
# ports:
# - "27017:27017" # 开放 mongo 端口到宿主机

@ -0,0 +1,41 @@
192.168.31.249 portainer.test.com
192.168.31.249 portainer.local.com
192.168.31.249 cloudreve.local.com
192.168.31.249 gitlab.local.com
192.168.31.249 drone.local.com
192.168.31.249 harbor.local.com
192.168.31.249 homeassistant.local.com
192.168.31.249 mongodb.local.com
192.168.31.249 mysql.local.com
192.168.31.249 uptime-kuma.local.com
192.168.31.249 crawlab.local.com
192.168.31.249 portainer.windymuse.site
192.168.31.249 cloudreve.windymuse.site
192.168.31.249 gitlab.windymuse.site
192.168.31.249 drone.windymuse.site
192.168.31.249 harbor.windymuse.site
192.168.31.249 homeassistant.windymuse.site
192.168.31.249 mongodb.windymuse.site
192.168.31.249 mysql.windymuse.site
192.168.31.249 uptime-kuma.windymuse.site
192.168.31.249 crawlab.windymuse.site
192.168.31.249 portainer.windymuse.top
192.168.31.249 cloudreve.windymuse.top
192.168.31.249 gitlab.windymuse.top
192.168.31.249 drone.windymuse.top
192.168.31.249 harbor.windymuse.top
192.168.31.249 homeassistant.windymuse.top
192.168.31.249 mongodb.windymuse.top
192.168.31.249 mysql.windymuse.top
192.168.31.249 uptime-kuma.windymuse.top
192.168.31.249 crawlab.windymuse.top
192.168.31.249 portainer.windymuse.fun
192.168.31.249 cloudreve.windymuse.fun
192.168.31.249 gitlab.windymuse.fun
192.168.31.249 drone.windymuse.fun
192.168.31.249 harbor.windymuse.fun
192.168.31.249 homeassistant.windymuse.fun
192.168.31.249 mongodb.windymuse.fun
192.168.31.249 mysql.windymuse.fun
192.168.31.249 uptime-kuma.windymuse.fun
192.168.31.249 crawlab.windymuse.fun

@ -0,0 +1,20 @@
version: '2'
services:
registry:
image: jpillora/dnsmasq
# image: andyshinn/dnsmasq:latest
container_name: my_dns_server
restart: always
tty: true
cap_add:
- NET_ADMIN
ports:
- 192.168.31.249:53:53/tcp
- 192.168.31.249:53:53/udp
- 8090:8080
environment:
- HTTP_USER=admin
- HTTP_PASS=123456
volumes:
- ./dnsmasq_hosts:/etc/my_dnsmasq_hosts
- ./resolv.dnsmasq:/etc/my_resolv.dnsmasq

@ -0,0 +1,5 @@
https://www.jianshu.com/p/6f9203ace607
https://dandelioncloud.cn/article/details/1596683573434466305

@ -0,0 +1,2 @@
nameserver 114.114.114.114
nameserver 8.8.8.8

@ -0,0 +1,31 @@
# 基于gitlab的drone部署
[GitLab | Drone](https://docs.drone.io/server/provider/gitlab/)
[基于gitlab的drone的搭建和使用 - 系统运维 - 亿速云 (yisu.com)](https://www.yisu.com/zixun/15849.html)
# drone的pipeline quickstart
[Docker Pipelines | Drone](https://docs.drone.io/quickstart/docker/)
# drone 部署 springboot
[(101条消息) Drone CI/CD系列(三)——java语言(Springboot)之配置.drone.yml文件_Juinjonn的博客-CSDN博客_drone springboot](https://blog.csdn.net/Bert_Chen/article/details/123611752)
# 基于gitea的部署
https://docs.drone.io/server/provider/gitea/
# 解决settings页面没有Trusted的问题
https://blog.csdn.net/qq_35425070/article/details/106822191
# [gitea &amp; drone webhook推送不成功](https://www.cnblogs.com/shiningrise/p/16944140.html)
https://www.cnblogs.com/shiningrise/p/16944140.html
# Maven 项目使用 Drone 实现 CI/CD含镜像源配置
https://www.jianshu.com/p/65bdd465a5b2

@ -0,0 +1,64 @@
# https://www.yisu.com/zixun/15849.html
# https://docs.drone.io/server/provider/gitlab/
version: '3'
services:
drone-server:
restart: always ## 容器自动重启
image: drone/drone:2
container_name: drone-server-cloudnative
networks: ## 使用自定义网络,也可以不写
- drone-net-cloudnative
ports:
- "9901:80" ## 映射到宿主机的端口
volumes:
- ./data:/var/lib/drone
environment:
- DRONE_SERVER_HOST=192.168.31.249:9901 ## 端口需要和映射至宿主机的一致
- DRONE_SERVER_PROTO=http
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## openssl rand -hex 16 自动生成
# true 表示允许注册false 表示关闭注册功能,此时只有 DRONE_ADMIN 中指定的账户才能登录
- DRONE_OPEN=true
# Drone 外网地址
- DRONE_HOST=https://drone.windymuse.site
# - DRONE_HOST=http://192.168.31.249:9901
# 使用 GitLab
- DRONE_GITLAB=true
# GitLab 的 Client ID
- DRONE_GITLAB_CLIENT_ID=193b8ca7beee9bcaa5dd8540e85e0f967d94177a1b4956648c25a15f380a9cad
# GitLab 的 Secret
- DRONE_GITLAB_CLIENT_SECRET=103071657036dfacb44dedb049a2fd4a3cf874f16a4cf7af352641cf44edda61
# GitLab 的 URL
- DRONE_GITLAB_SERVER=http://192.168.31.249:8929
# 设置admin权限以使用Trusted功能
# https://blog.csdn.net/qq_35425070/article/details/106822146
# https://blog.csdn.net/qq_35425070/article/details/106822191
- DRONE_USER_CREATE=admin:true,token:gLBwGdBXUCTri6gEDDsLZW0ekoZqfcpq
drone-runner-docker:
restart: always ## 容器自动重启
image: drone/drone-runner-docker:1
container_name: drone-runner-docker-cloudnative
networks:
- drone-net-cloudnative
ports:
- "9902:3000" ## 映射到宿主机的端口用于提供web页面显示任务执行情况可不映射
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_RPC_PROTO=http
- DRONE_RPC_HOST=drone-server-cloudnative
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## 与server服务中保持一致
- DRONE_RUNNER_NAME=docker-worker
- DRONE_RUNNER_CAPACITY=2
- TZ=Asia/Shanghai
- DRONE_DEBUG=true
#- DRONE_LOGS_DEBUG=true
#- DRONE_LOGS_TRACE=true
#- DRONE_UI_USERNAME=admin ## runner的web页面用户信息
#- DRONE_UI_PASSWORD=password
depends_on:
- drone-server
networks: ## 可以不自定义
drone-net-cloudnative:
driver: bridge

@ -0,0 +1,63 @@
# https://www.yisu.com/zixun/15849.html
# https://docs.drone.io/server/provider/gitlab/
version: '3'
services:
drone-server:
restart: always ## 容器自动重启
image: drone/drone:2
container_name: drone-server-cloudnative
networks: ## 使用自定义网络,也可以不写
- drone-net-cloudnative
ports:
- "9901:80" ## 映射到宿主机的端口
volumes:
- ./data:/var/lib/drone
environment:
- DRONE_SERVER_HOST=drone.windymuse.site ## 端口需要和映射至宿主机的一致
- DRONE_SERVER_PROTO=https
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## openssl rand -hex 16 自动生成
# true 表示允许注册false 表示关闭注册功能,此时只有 DRONE_ADMIN 中指定的账户才能登录
- DRONE_OPEN=true
# Drone 外网地址
- DRONE_HOST=https://drone.windymuse.site
# 使用 Gitea
- DRONE_GITEA=true
# Gitea 的 Client ID
- DRONE_GITEA_CLIENT_ID=fa766513-c894-47b2-8886-6f990a998647
# Gitea 的 Secret
- DRONE_GITEA_CLIENT_SECRET=gto_jejijdu5ubefxudmno2w2zk5fiq3b2vztnrtgkhdinjrelklsfzq
# Gitea 的 URL
- DRONE_GITEA_SERVER=https://gitea.windymuse.site
# 设置admin权限以使用Trusted功能
# https://blog.csdn.net/qq_35425070/article/details/106822146
# https://blog.csdn.net/qq_35425070/article/details/106822191
# ,token:Ac7W1qgPyqxe9u00lI9E2IBrXpT8T6CA
- DRONE_USER_CREATE=username:maidong,admin:true
drone-runner-docker:
restart: always ## 容器自动重启
image: drone/drone-runner-docker:1
container_name: drone-runner-docker-cloudnative
networks:
- drone-net-cloudnative
ports:
- "9902:3000" ## 映射到宿主机的端口用于提供web页面显示任务执行情况可不映射
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_RPC_PROTO=http
- DRONE_RPC_HOST=drone-server-cloudnative
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## 与server服务中保持一致
- DRONE_RUNNER_NAME=docker-worker
- DRONE_RUNNER_CAPACITY=2
- TZ=Asia/Shanghai
- DRONE_DEBUG=true
#- DRONE_LOGS_DEBUG=true
#- DRONE_LOGS_TRACE=true
- DRONE_UI_USERNAME=admin ## runner的web页面用户信息
- DRONE_UI_PASSWORD=password
depends_on:
- drone-server
networks: ## 可以不自定义
drone-net-cloudnative:
driver: bridge

@ -0,0 +1,15 @@
<settings xmlns="http://maven.apache.org/SETTINGS/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.1.0 http://maven.apache.org/xsd/settings-1.1.0.xsd">
<localRepository>/root/.m2/repository</localRepository>
<mirrors>
<mirror>
<id>alimaven</id>
<name>aliyun</name>
<mirrorOf>central</mirrorOf>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<pluginGroups>
<pluginGroup>org.apache.maven.plugins</pluginGroup>
<pluginGroup>org.codehaus.mojo</pluginGroup>
</pluginGroups>
</settings>

@ -0,0 +1,19 @@
version: "3"
networks:
default:
name: 'fastposter-net'
services:
server:
image: tangweixin/fast-poster
container_name: fast-poster
restart: always
ports:
- "5000:5000"
environment:
TZ: Asia/Shanghai
TOKEN: ApfrIzxCoK1DwNZOEJCwlrnv6QZ0PCdv
POSTER_URI_PREFIX: https://fast-poster.windymuse.site/
volumes:
- ./data:/app/data

@ -0,0 +1,43 @@
version: "3"
networks:
gitea:
external: false
services:
server:
image: gitea/gitea:1.18.1
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=gitea_db:3306
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
networks:
- gitea
volumes:
- ./gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"
depends_on:
- gitea_db
gitea_db:
image: mysql:8
restart: always
environment:
- MYSQL_ROOT_PASSWORD=gitea
- MYSQL_USER=gitea
- MYSQL_PASSWORD=gitea
- MYSQL_DATABASE=gitea
networks:
- gitea
volumes:
- ./mysql:/var/lib/mysql

@ -0,0 +1,8 @@
# [gitea &amp; drone webhook推送不成功](https://www.cnblogs.com/shiningrise/p/16944140.html)
https://www.cnblogs.com/shiningrise/p/16944140.html
用户名maidong
密码123456

@ -0,0 +1,13 @@
version: '3'
services:
# https://blog.csdn.net/fengtao0821/article/details/110526941
gitlab-runner:
image: gitlab/gitlab-runner
restart: always
container_name: gitlab-runner
user: root
privileged: true
volumes:
- ./config:/etc/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock

@ -0,0 +1,6 @@
## 由于使用了 /var/run/docker.sock所以需要给予权限比如使用root用户在docker-compose文件中如下填写即可
user: root
privileged: true

@ -0,0 +1,24 @@
version: '3'
services:
gitlab:
image: 'gitlab/gitlab-ce:latest'
container_name: gitlab
restart: always
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'http://192.168.31.249:8929' #若有域名可以写域名
gitlab_rails['gitlab_shell_ssh_port'] = 2224
ports:
- '8929:8929'
- '2224:22'
logging:
driver: "json-file"
options:
max-size: 1024m
volumes:
#将相关配置映射到当前目录下的config目录
- './config:/etc/gitlab'
#将日志映射到当前目录下的logs目录
- './logs:/var/log/gitlab'
#将数据映射到当前目录下的data目录
- './data:/var/opt/gitlab'

@ -0,0 +1,8 @@
初始root密码
在容器中的/etc/gitlab/initial_root_password中
在挂载文件config/initial_root_password中寻找
修改后的密码gitlab@123

@ -0,0 +1,23 @@
version: '3'
services:
grafana:
image: grafana/grafana
container_name: grafana
restart: always
user: root
ports:
- "3100:3000"
environment:
- HTTP_USER=admin
- HTTP_PASS=admin
- INFLUXDB_HOST=influxdb
- INFLUXDB_PORT=8186
- INFLUXDB_NAME=telegraf
- INFLUXDB_USER=root
- INFLUXDB_PASS=root
volumes:
- "./grafana:/var/lib/grafana"

@ -0,0 +1,5 @@
https://blog.csdn.net/weixin_46557630/article/details/127071444
https://blog.csdn.net/qq_36595568/article/details/124285925

@ -0,0 +1,123 @@
[离线安装 Harbor v2 | AtomPi&#39;s Blog](https://blog.atompi.com/2020/08/03/%E7%A6%BB%E7%BA%BF%E5%AE%89%E8%A3%85%20Harbor%20v2/)
默认用户名:
admin
默认密码Harbor12345
Harbor12345
### 开始部署
* 下载离线安装程序
下载地址: `https://github.com/vmware/harbor/releases`
选择: `harbor-offline-installer-<version>.tgz</version>`
* 压安装包
|
```
# tar -xf harbor-offline-installer-xxx.tgz
```
* 配置 Harbor
配置文件模板为: `harbor.yml.tmpl`
复制模板文件,并命名为 `harbor.yml`
`harbor.yml` 中有两类参数:必需参数和可选参数。
* 必选参数:需要在配置文件中设置这些参数。如果用户在 `harbor.yml` 中更新它们并运行 `install.sh` 脚本以重新安装 Harbor ,它们将生效。
* 可选参数:这些参数对于更新是可选的,即用户可以将它们保留为默认值,并在启动 Harbour 后在 Web UI 上更新它们。如果它们设置在 `harbor.yml` 中,它们只会在第一次启动 Harbour 时生效。后续在 Web UI 中的更新将忽略 `harbor.yml` 中这些参数。
*在这里,我只列出部分必选参数的配置,具体参数说明见文章末尾处给出的官方安装文档连接*
1. 必选参数:
* `hostname`: 访问 harbor 的域名,可以使用主机 IP ,但建议使用域名
`hostname = reg.atompi.com`
* `http / https`: 访问 harbor 的 url 协议,若只使用 `http` ,则注释掉 `https` 项即可(如下),若启用 `https` 请参考 [Configuring Harbor with HTTPS Access](https://goharbor.io/docs/2.0.0/install-config/configure-https/)
|
```
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
# https related config
#https:
# https port for harbor, default is 443
#port: 443
# The path of cert and key files for nginx
#certificate: /your/certificate/path
#private_key: /your/private/key/path
```
* `harbor_admin_password`: 超级管理员用户 `admin` 的登录密码
* `database.password`: 数据库 root 用户密码,用于指定 Harbor 默认数据库 root 用户密码,生产环境建议修改这个值为复杂密码
|
```
database:
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
```
* `data_volume`: 由于 Harbor 所有组件都使用 docker 容器化部署,所以需要将容器数据持久化到宿主机目录,该配置项就是指定持久化目录的挂载点,建议在某目录下创建 `harbor` 文件夹,并作为挂载点,便于统一管理和迁移。
|
```
data_volume: /data/harbor
```
### 执行 install.sh 开始安装
默认情况下,不带任何参数的 `install.sh` 脚本只安装 Harbor 主服务,如果需要安装其他附加服务,可带指定参数安装,可安装的附加服务有:
|
```
Harbor with Notary --with-notary ):一个允许任何人信任任意数据集合的项目,用于运行和与可信集合交互。
Harbor with Clair --with-clair Docker 镜像安全漏洞扫描工具
Harbor with Chart Repository Service --with-chartmuseum Helm 包管理工具
```
这里我们安装带 Helm 包管理的 Harbor
|
```
sudo ./install.sh --with-chartmuseum
```
### 登录 harbor 并配置 Docker “insecure-registries”
* Web UI 地址: `http://reg.atompi.com`
* docker login
修改 docker daemon 配置文件 `/etc/docker/daemon.json` ,添加如下配置项:
|
```
{
"insecure-registries": ["reg.atompi.com"]
}
```
docker 登录 registry
```
# docker login reg.atompi.com
```

@ -0,0 +1,58 @@
version: '3.7'
services:
telegraf:
image: telegraf
container_name: telegraf
restart: always
volumes:
- ./telegraf:/etc/telegraf
- /sys:/rootfs/sys:ro
- /proc:/rootfs/proc:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- 'HOST_PROC=/rootfs/proc'
- 'HOST_SYS=/rootfs/sys'
- /etc/localtime:/etc/localtime:ro
networks:
- monitor
depends_on:
- influxdb
influxdb:
# image: influxdb:2.0
image: influxdb:1.8
container_name: influxdb
restart: always
environment:
# - DOCKER_INFLUXDB_INIT_MODE=setup
# - DOCKER_INFLUXDB_INIT_USERNAME=my-user
# - DOCKER_INFLUXDB_INIT_PASSWORD=my-password
# - DOCKER_INFLUXDB_INIT_ORG=my-org
# - DOCKER_INFLUXDB_INIT_BUCKET=my-bucket
# - INFLUXDB_DB=db0
- INFLUXDB_HTTP_AUTH_ENABLED=true
- INFLUXDB_ADMIN_USER=telegraf
- INFLUXDB_ADMIN_PASSWORD=telegraf
# - PRE_CREATE_DB=telegraf
# - ADMIN_USER="telegraf"
# - INFLUXDB_INIT_PWD="telegraf"
# - GOGC=10
# - INFLUXDB_DATA_INDEX_VERSION=tsi1
ports:
- "8086:8086"
expose:
- "8090"
- "8099"
volumes:
# - "./influxdb/data:/var/lib/influxdb2"
# - "./influxdb/config:/etc/influxdb2"
- "./influxdb/db:/var/lib/influxdb"
networks:
- monitor
networks:
monitor:
name: 'influxdb-net'

@ -0,0 +1 @@
https://blog.csdn.net/qq_32096997/article/details/115010804

@ -0,0 +1,30 @@
# https://juejin.cn/post/7018489702651002887
version: '3.3'
services:
mongodb:
container_name: mongodb
image: mongo:4.4.14
restart: always
ports:
- 27017:27017
volumes:
- ./data/db:/data/db
- ./data/log:/var/log/mongodb
- ./data/config:/etc/mongo
environment:
- MONGO_INITDB_ROOT_USERNAME=admin
- MONGO_INITDB_ROOT_PASSWORD=admin
mongo-express:
image: mongo-express:0.54.0
container_name: mongo-express
restart: always
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=root
- ME_CONFIG_MONGODB_ADMINPASSWORD=root
- ME_CONFIG_MONGODB_SERVER=mongodb
networks:
default:
name: mongodb_network

@ -0,0 +1,45 @@
version: '2'
services:
mariadb:
image: docker.io/bitnami/mariadb:10.6
restart: always
environment:
# ALLOW_EMPTY_PASSWORD is recommended only for development.
- ALLOW_EMPTY_PASSWORD=yes
- MARIADB_USER=bn_moodle
- MARIADB_DATABASE=bitnami_moodle
- MARIADB_CHARACTER_SET=utf8mb4
- MARIADB_COLLATE=utf8mb4_unicode_ci
volumes:
- 'mariadb_data:/bitnami/mariadb'
moodle:
image: docker.io/bitnami/moodle:4.1
restart: always
ports:
- '8180:8080'
- '8143:8443'
environment:
- MOODLE_USERNAME=user
- MOODLE_PASSWORD=bitnami
- MOODLE_DATABASE_HOST=mariadb
- MOODLE_DATABASE_PORT_NUMBER=3306
- MOODLE_DATABASE_USER=bn_moodle
- MOODLE_DATABASE_NAME=bitnami_moodle
- MOODLE_SITE_NAME=maidong
- MOODLE_HOST=moodle.windymuse.site
- MOODLE_REVERSEPROXY=true
- MOODLE_SSLPROXY=true
# ALLOW_EMPTY_PASSWORD is recommended only for development.
- ALLOW_EMPTY_PASSWORD=yes
volumes:
- 'moodle_data:/bitnami/moodle'
- 'moodledata_data:/bitnami/moodledata'
depends_on:
- mariadb
volumes:
mariadb_data:
driver: local
moodle_data:
driver: local
moodledata_data:
driver: local

@ -0,0 +1,3 @@
部署文档
https://hub.docker.com/r/bitnami/moodle

@ -0,0 +1,14 @@
version: '3.1'
services:
db:
container_name: mysql57
image: mysql:5.7
volumes:
- ./data/db:/var/lib/mysql
- ./etc/my.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf
restart: always
ports:
- 33306:3306
environment:
MYSQL_ROOT_PASSWORD: 123456
secure_file_priv:

@ -0,0 +1,48 @@
[mysqld]
character-set-server=utf8mb4
log-bin=mysql-bin
server-id=1
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
symbolic-links=0
secure_file_priv =
wait_timeout=120
interactive_timeout=120
default-time_zone = '+8:00'
skip-external-locking
skip-name-resolve
open_files_limit = 10240
max_connections = 1000
max_connect_errors = 6000
table_open_cache = 800
max_allowed_packet = 40m
sort_buffer_size = 2M
join_buffer_size = 1M
thread_cache_size = 32
query_cache_size = 64M
transaction_isolation = READ-COMMITTED
tmp_table_size = 128M
max_heap_table_size = 128M
log-bin = mysql-bin
sync-binlog = 1
binlog_format = ROW
binlog_cache_size = 1M
key_buffer_size = 128M
read_buffer_size = 2M
read_rnd_buffer_size = 4M
bulk_insert_buffer_size = 64M
lower_case_table_names = 1
explicit_defaults_for_timestamp=true
skip_name_resolve = ON
event_scheduler = ON
log_bin_trust_function_creators = 1
innodb_buffer_pool_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_file_per_table = 1
innodb_log_buffer_size = 4M
innodb_log_file_size = 256M
innodb_max_dirty_pages_pct = 90
innodb_read_io_threads = 4
innodb_write_io_threads = 4

@ -0,0 +1,11 @@
version: "3.9"
services:
nexus:
image: sonatype/nexus3:latest
container_name: nexus
restart: always
volumes:
# 自定义数据卷位置为当前目录下的 data 目录
- ./data:/nexus-data
ports:
- "8082:8081"

@ -0,0 +1,6 @@
# 需要给予权限 chmod -R 777 .
admin的初始密码被储存在data中容器内的/nexus-data中的admin.password中
admin的密码被设置为 admin123

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name cloudreve.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:5212;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name crawlab.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8080;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name drone.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:9901;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name fast-poster.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:5000;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name gitea.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:3000;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name gitlab.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8929;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name grafana.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:3100;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name harbor.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:6080;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name home-assistant.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8123;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name momgo-express.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8081;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name moodle.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8180;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name nexus.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8082;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name portainer.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:9009;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name postgresql-adminer.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:8888;
}
}

@ -0,0 +1,18 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name sonarqube.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:9000;
}
}

@ -0,0 +1,23 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name uptime-kuma.windymuse.site;
ssl_certificate /ssl/fullchain.crt;
ssl_certificate_key /ssl/private.pem;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
client_max_body_size 100m;
location / {
proxy_pass http://192.168.31.249:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}

@ -0,0 +1,63 @@
user root;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
# https://blog.csdn.net/weixin_44972135/article/details/92806391
client_max_body_size 1024m;
client_body_buffer_size 10m;
client_header_buffer_size 10m;
proxy_buffers 4 128k;
proxy_busy_buffers_size 128k;
server {
listen 80;
server_name *.windymuse.site;
location / {
rewrite ^(.*)$ https://$host$1 permanent;
}
}
server {
listen 80;
server_name *.windymuse.top;
location / {
rewrite ^(.*)$ https://$host$1 permanent;
}
}
server {
listen 80;
server_name *.windymuse.fun;
location / {
rewrite ^(.*)$ https://$host$1 permanent;
}
}
include /etc/nginx/conf.d/*.conf;
}

@ -0,0 +1,50 @@
version: '3'
services:
nginx-proxy:
restart: always
image: nginx
container_name: nginx-proxy
ports:
- 80:80
- 443:443
volumes:
- ./data/conf.d:/etc/nginx/conf.d
- ./data/log:/var/log/nginx
- ./data/conf/nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/ssl
# - /usr/local/myApp:/usr/share/nginx/html
# - /etc/letsencrypt:/etc/letsencrypt
# https://blog.csdn.net/qq_42700766/article/details/128492794
# username:admin
# password:admin
nginx-ui:
restart: always
image: crazyleojay/nginx_ui:latest
container_name: nginx-ui
ports:
- 8880:80
- 8889:8889
volumes:
- ./data/conf/nginx.conf:/install/nginx/conf/nginx.conf
- ./data/conf.d:/etc/nginx/conf.d
# https://blog.csdn.net/feifeiyechuan/article/details/115344837
# https://www.cnblogs.com/Yogile/p/15002756.html
# https://kevinmeng.fitit100.com/archives/shi-yong-nginxwebui-kuai-su-an-zhuang-he-pei-zhi-nginx
# username:admin
# password:admin/Admin123
nginx-web-ui:
restart: always
image: cym1102/nginxwebui:latest
container_name: nginx-web-ui
privileged: true
volumes:
- ./nginxWebUI:/home/nginxWebUI
- ./data/conf.d:/etc/nginx/conf.d
- ./data/log:/var/log/nginx
- ./data/conf/nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/ssl
environment:
BOOT_OPTIONS: "--server.port=8899"
network_mode: "host"

@ -0,0 +1 @@
ssl中的证书可用于 windymuse.top , *.windymuse.top , windymuse.site , *.windymuse.site , windymuse.fun , *.windymuse.fun

@ -0,0 +1,32 @@
-----BEGIN CERTIFICATE-----
MIIFcjCCBFqgAwIBAgISA20ByOOc1b5C/yvbTOXmrX7rMA0GCSqGSIb3DQEBCwUA
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
EwJSMzAeFw0yMzAyMDUyMDM0MDVaFw0yMzA1MDYyMDM0MDRaMBgxFjAUBgNVBAMT
DXdpbmR5bXVzZS50b3AwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF
VdsL4OvpVLh4UQ43cwP8bSIVJw/BjJPeKDDqZQu8n54vuPY9evvMQp626vfXW8/h
is2RzIrAcb3rHj/AFWhSUREstaHwviHkBn7zwUneHqHgBlgUoZ9eiAYPjV8oaYDw
+0grbcwG/bzThV7SNGJeyzVwU/VQJmB5JqPuyAn4iip4xpe8F3RyHgJYUVwSMlIA
yiocbhsXzLRP25S0mIJyW7KUwyINP3H2LvZR8A2KS+aaRISlT5QOyBlbpBJjbKsQ
d9+kkdTTy7v0hJqWXSRp1octeF301vX9KjDmLftLLbI9QrK1c7pr4T6E9/oxK8Y0
NG+DLeRiPw7H5OoZqtJZAgMBAAGjggKaMIICljAOBgNVHQ8BAf8EBAMCBaAwHQYD
VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0O
BBYEFBo5kDgeqx9VTXnikannqfzQiaKKMB8GA1UdIwQYMBaAFBQusxe3WFbLrlAJ
QOYfr52LFMLGMFUGCCsGAQUFBwEBBEkwRzAhBggrBgEFBQcwAYYVaHR0cDovL3Iz
Lm8ubGVuY3Iub3JnMCIGCCsGAQUFBzAChhZodHRwOi8vcjMuaS5sZW5jci5vcmcv
MGsGA1UdEQRkMGKCDyoud2luZHltdXNlLmZ1boIQKi53aW5keW11c2Uuc2l0ZYIP
Ki53aW5keW11c2UudG9wgg13aW5keW11c2UuZnVugg53aW5keW11c2Uuc2l0ZYIN
d2luZHltdXNlLnRvcDBMBgNVHSAERTBDMAgGBmeBDAECATA3BgsrBgEEAYLfEwEB
ATAoMCYGCCsGAQUFBwIBFhpodHRwOi8vY3BzLmxldHNlbmNyeXB0Lm9yZzCCAQMG
CisGAQQB1nkCBAIEgfQEgfEA7wB1AHoyjFTYty22IOo44FIe6YQWcDIThU070ivB
OlejUutSAAABhiOAJCMAAAQDAEYwRAIgAwOZRKfr7aNRx8ce3ITEuRSdiStvmn4Y
EnRHEoiI6EkCIHJoJUrIEDqxcrBU9oWpYUGyyNiod+RkFK0eGZ+Dp45zAHYA6D7Q
2j71BjUy51covIlryQPTy9ERa+zraeF3fW0GvW4AAAGGI4AkEwAABAMARzBFAiBE
MIwKdMz2ASinPTBilXPjfxL5drx0AaCfnLvNlYeWKAIhAIOokwFKSXNTo9mOCmsv
A/xKN5IMZEIkmq9ZH3yN2uFWMA0GCSqGSIb3DQEBCwUAA4IBAQAr+T18EScuvHXY
rh+OdNz6TkeAwZEE94274GFMyQmZfCy4YFU3gOkBS6Fr9UyqT74nicuqx2BWEHyZ
wxKFBYoSdxzQzLQaAwYWKTI5lJ5YzL+Na3UEgFQMgnQAu33WUmN1TGaDZRkU7nwt
CBB5IHrMggdmuqWhpTZNjLwESVywpsnBj3AVFTgiVeNo9JflcEzBtnztYxtDKuLa
AatN2y3g7JG8ubxJIK4W+nbgWMadMz9Z6unpRl/G+lxF+5elbl3Ecc5nysW+nBNV
P3QjplirAFYhvguRs0qIdt9KZerdVqmtcF2lI2AoIwLP4w89CaSw3nTBA3LYyyzx
fz1vLQXy
-----END CERTIFICATE-----

Binary file not shown.

@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
nLRbwHOoq7hHwg==
-----END CERTIFICATE-----

@ -0,0 +1,31 @@
-----BEGIN CERTIFICATE-----
MIIFYDCCBEigAwIBAgIQQAF3ITfU6UK47naqPGQKtzANBgkqhkiG9w0BAQsFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTIxMDEyMDE5MTQwM1oXDTI0MDkzMDE4MTQwM1ow
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQCt6CRz9BQ385ueK1coHIe+3LffOJCMbjzmV6B493XC
ov71am72AE8o295ohmxEk7axY/0UEmu/H9LqMZshftEzPLpI9d1537O4/xLxIZpL
wYqGcWlKZmZsj348cL+tKSIG8+TA5oCu4kuPt5l+lAOf00eXfJlII1PoOK5PCm+D
LtFJV4yAdLbaL9A4jXsDcCEbdfIwPPqPrt3aY6vrFk/CjhFLfs8L6P+1dy70sntK
4EwSJQxwjQMpoOFTJOwT2e4ZvxCzSow/iaNhUd6shweU9GNx7C7ib1uYgeGJXDR5
bHbvO5BieebbpJovJsXQEOEO3tkQjhb7t/eo98flAgeYjzYIlefiN5YNNnWe+w5y
sR2bvAP5SQXYgd0FtCrWQemsAXaVCg/Y39W9Eh81LygXbNKYwagJZHduRze6zqxZ
Xmidf3LWicUGQSk+WT7dJvUkyRGnWqNMQB9GoZm1pzpRboY7nn1ypxIFeFntPlF4
FQsDj43QLwWyPntKHEtzBRL8xurgUBN8Q5N0s8p0544fAQjQMNRbcTa0B7rBMDBc
SLeCO5imfWCKoqMpgsy6vYMEG6KDA0Gh1gXxG8K28Kh8hjtGqEgqiNx2mna/H2ql
PRmP6zjzZN7IKw0KKP/32+IVQtQi0Cdd4Xn+GOdwiK1O5tmLOsbdJ1Fu/7xk9TND
TwIDAQABo4IBRjCCAUIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
SwYIKwYBBQUHAQEEPzA9MDsGCCsGAQUFBzAChi9odHRwOi8vYXBwcy5pZGVudHJ1
c3QuY29tL3Jvb3RzL2RzdHJvb3RjYXgzLnA3YzAfBgNVHSMEGDAWgBTEp7Gkeyxx
+tvhS5B1/8QVYIWJEDBUBgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEB
ATAwMC4GCCsGAQUFBwIBFiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQu
b3JnMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9jcmwuaWRlbnRydXN0LmNvbS9E
U1RST09UQ0FYM0NSTC5jcmwwHQYDVR0OBBYEFHm0WeZ7tuXkAXOACIjIGlj26Ztu
MA0GCSqGSIb3DQEBCwUAA4IBAQAKcwBslm7/DlLQrt2M51oGrS+o44+/yQoDFVDC
5WxCu2+b9LRPwkSICHXM6webFGJueN7sJ7o5XPWioW5WlHAQU7G75K/QosMrAdSW
9MUgNTP52GE24HGNtLi1qoJFlcDyqSMo59ahy2cI2qBDLKobkx/J3vWraV0T9VuG
WCLKTVXkcGdtwlfFRjlBz4pYg1htmf5X6DYO8A4jqv2Il9DjXA6USbW1FzXSLr9O
he8Y4IWS6wY7bCkjCWDcRQJMEhg76fsO3txE+FiYruq9RUWhiF1myv4Q6W+CyBFC
Dfvp7OOGAN6dEOM4+qR9sdjoSYKEBpsr6GtPAQw4dy753ec5
-----END CERTIFICATE-----

@ -0,0 +1,28 @@
******************************************************************
【来此加密】
https://letsencrypt.osfipin.com/
******************************************************************
1. 域名:
windymuse.top,*.windymuse.top,windymuse.site,*.windymuse.site,windymuse.fun,*.windymuse.fun
------------------------------------------------------------------
2. 域名详情:
https://acme-v02.api.letsencrypt.org/acme/order/303940600/163098956576
------------------------------------------------------------------
3. 到期时间:
2023-05-07 04:34:04
------------------------------------------------------------------
4. 文件说明:
fullchain.crt: 证书和证书链
certificate.crt: 证书
chain.crt: 证书链
chain_old.crt: 低版本系统证书链
private.pem: 密钥(请妥善保存)
public.pem: 公钥
certificate.pfx: pfx格式证书(使用pfx导入密码)
------------------------------------------------------------------
5. pfx导入密码:
20f31a
******************************************************************
联系邮箱:
support@mail.osfipin.com
******************************************************************

@ -0,0 +1,63 @@
-----BEGIN CERTIFICATE-----
MIIFcjCCBFqgAwIBAgISA20ByOOc1b5C/yvbTOXmrX7rMA0GCSqGSIb3DQEBCwUA
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
EwJSMzAeFw0yMzAyMDUyMDM0MDVaFw0yMzA1MDYyMDM0MDRaMBgxFjAUBgNVBAMT
DXdpbmR5bXVzZS50b3AwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDF
VdsL4OvpVLh4UQ43cwP8bSIVJw/BjJPeKDDqZQu8n54vuPY9evvMQp626vfXW8/h
is2RzIrAcb3rHj/AFWhSUREstaHwviHkBn7zwUneHqHgBlgUoZ9eiAYPjV8oaYDw
+0grbcwG/bzThV7SNGJeyzVwU/VQJmB5JqPuyAn4iip4xpe8F3RyHgJYUVwSMlIA
yiocbhsXzLRP25S0mIJyW7KUwyINP3H2LvZR8A2KS+aaRISlT5QOyBlbpBJjbKsQ
d9+kkdTTy7v0hJqWXSRp1octeF301vX9KjDmLftLLbI9QrK1c7pr4T6E9/oxK8Y0
NG+DLeRiPw7H5OoZqtJZAgMBAAGjggKaMIICljAOBgNVHQ8BAf8EBAMCBaAwHQYD
VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0O
BBYEFBo5kDgeqx9VTXnikannqfzQiaKKMB8GA1UdIwQYMBaAFBQusxe3WFbLrlAJ
QOYfr52LFMLGMFUGCCsGAQUFBwEBBEkwRzAhBggrBgEFBQcwAYYVaHR0cDovL3Iz
Lm8ubGVuY3Iub3JnMCIGCCsGAQUFBzAChhZodHRwOi8vcjMuaS5sZW5jci5vcmcv
MGsGA1UdEQRkMGKCDyoud2luZHltdXNlLmZ1boIQKi53aW5keW11c2Uuc2l0ZYIP
Ki53aW5keW11c2UudG9wgg13aW5keW11c2UuZnVugg53aW5keW11c2Uuc2l0ZYIN
d2luZHltdXNlLnRvcDBMBgNVHSAERTBDMAgGBmeBDAECATA3BgsrBgEEAYLfEwEB
ATAoMCYGCCsGAQUFBwIBFhpodHRwOi8vY3BzLmxldHNlbmNyeXB0Lm9yZzCCAQMG
CisGAQQB1nkCBAIEgfQEgfEA7wB1AHoyjFTYty22IOo44FIe6YQWcDIThU070ivB
OlejUutSAAABhiOAJCMAAAQDAEYwRAIgAwOZRKfr7aNRx8ce3ITEuRSdiStvmn4Y
EnRHEoiI6EkCIHJoJUrIEDqxcrBU9oWpYUGyyNiod+RkFK0eGZ+Dp45zAHYA6D7Q
2j71BjUy51covIlryQPTy9ERa+zraeF3fW0GvW4AAAGGI4AkEwAABAMARzBFAiBE
MIwKdMz2ASinPTBilXPjfxL5drx0AaCfnLvNlYeWKAIhAIOokwFKSXNTo9mOCmsv
A/xKN5IMZEIkmq9ZH3yN2uFWMA0GCSqGSIb3DQEBCwUAA4IBAQAr+T18EScuvHXY
rh+OdNz6TkeAwZEE94274GFMyQmZfCy4YFU3gOkBS6Fr9UyqT74nicuqx2BWEHyZ
wxKFBYoSdxzQzLQaAwYWKTI5lJ5YzL+Na3UEgFQMgnQAu33WUmN1TGaDZRkU7nwt
CBB5IHrMggdmuqWhpTZNjLwESVywpsnBj3AVFTgiVeNo9JflcEzBtnztYxtDKuLa
AatN2y3g7JG8ubxJIK4W+nbgWMadMz9Z6unpRl/G+lxF+5elbl3Ecc5nysW+nBNV
P3QjplirAFYhvguRs0qIdt9KZerdVqmtcF2lI2AoIwLP4w89CaSw3nTBA3LYyyzx
fz1vLQXy
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
nLRbwHOoq7hHwg==
-----END CERTIFICATE-----

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFVdsL4OvpVLh4
UQ43cwP8bSIVJw/BjJPeKDDqZQu8n54vuPY9evvMQp626vfXW8/his2RzIrAcb3r
Hj/AFWhSUREstaHwviHkBn7zwUneHqHgBlgUoZ9eiAYPjV8oaYDw+0grbcwG/bzT
hV7SNGJeyzVwU/VQJmB5JqPuyAn4iip4xpe8F3RyHgJYUVwSMlIAyiocbhsXzLRP
25S0mIJyW7KUwyINP3H2LvZR8A2KS+aaRISlT5QOyBlbpBJjbKsQd9+kkdTTy7v0
hJqWXSRp1octeF301vX9KjDmLftLLbI9QrK1c7pr4T6E9/oxK8Y0NG+DLeRiPw7H
5OoZqtJZAgMBAAECggEAEXz6XKMiRRts/byxNdjLkaNaUeWUT+LVd+q36w3pbF34
9uG9cNVlW0TMLAnGzR8l3Scf8Lvf0A5EdoHHxI4hIKpuizfNkjhHaJ3qwk9u7HBq
iCAHzkxEg2wqnVY6CKrQ8opSawbf/f6erWA0dwyNazk3Ey1doCzGf/YcsRypRY1R
EKB5X4U8buT03MwQGCnU242XYW9CB+pkRsA8lADAmatJ9WlUQ0lujw5ftX3lHESx
gHuzZbJxQsqGqmLq9RGPRY54KV1E2fIVYBj+c2uMxAf2Rz4N3SJt1LhPElHeZ0sm
XHfx0rBAjXgzE0iTKz6si5wL9dOp5ips/TNpJ8kYwQKBgQD3axGSfIpPQ8876t+M
MiQRYAvt2rrWTZnj4Ijahm4jI/dcZ2q1+1gisJ6uTv966phd9rQf7ULtJeHspn8z
w9hnh/JjEn6RPZUlsql5fjyYT0T0tBvoC8DJwegT1JnBu05KQbSzt0bcGpL46Cui
EETeg8nvRqV5pF66FdkVeK1ymwKBgQDMLhR91/F3gt/jQ8uDhg2ts60uMeR7e160
4V9GVca7O4gkx6WSTT6tZiO6y1T0NBLWsRY3hbhsJQeb6wZPG9qYjY7BVpihxSiD
HqqIN2v/x9kNOEnmQTpck9jd1Ze4Ga1wHBHzPY4MQhwtIN5lXA93GocU9VIgQkUK
T0u6Cqz0GwKBgDWuIjrhEHUQJkc+t0/IlTC74PqzKTRT096wGaEn539yTznaKjnf
lYi5P0RcWWvGuFYyr3P52zpurKzWluSZSjt0MG+0VIeYj+KE9+YnMGhvUmuxJBjd
a++aRYr/5ng09lCAHv+a9SHOFjMw1D6uPvL0LvzeHArfUboYAWh6LU+fAoGAYvN+
CNLregDaIhjL+2yvGlx2dGnHkjRi3IRQmRSe4ERkoXaD/lgjJQPYEwHRpRvLeXKP
Es9iZj635k9LWnFkSgRoh7hA9j1HeewVtzgsxeQDg82gus8nyo/4TOHjylvfWvKv
nFNbkQNFBnXkcfjgodcgWf7eEzYqoWl2iK39zbsCgYEA282e6wVhosqASeBZHf5t
AWGZdPBCdgXMJjUcn0hAJoN5rKgPxcHqvjj7RmPLgrFLSzuFfkKtAAG5SPY3uB1y
xMAOnrBhWwQOuBTgmezBzCkecgqEOmVQUlGSdEnPDTycpoZDYIvPqcVY1l4c/vLG
l0Ev8CfJhtpY3EinDwDL0lE=
-----END PRIVATE KEY-----

@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxVXbC+Dr6VS4eFEON3MD
/G0iFScPwYyT3igw6mULvJ+eL7j2PXr7zEKetur311vP4YrNkcyKwHG96x4/wBVo
UlERLLWh8L4h5AZ+88FJ3h6h4AZYFKGfXogGD41fKGmA8PtIK23MBv2804Ve0jRi
Xss1cFP1UCZgeSaj7sgJ+IoqeMaXvBd0ch4CWFFcEjJSAMoqHG4bF8y0T9uUtJiC
cluylMMiDT9x9i72UfANikvmmkSEpU+UDsgZW6QSY2yrEHffpJHU08u79ISall0k
adaHLXhd9Nb1/Sow5i37Sy2yPUKytXO6a+E+hPf6MSvGNDRvgy3kYj8Ox+TqGarS
WQIDAQAB
-----END PUBLIC KEY-----

@ -0,0 +1,18 @@
#!/bin/bash
cat ./data/conf.d/*.conf | grep "server_name" | grep -v "#" >> 1.txt #这里需要改成你的nginx路径
sed -i "s#server_name##g" 1.txt
sed -i "s#;##g" 1.txt
DomainList=`cat 1.txt | xargs`
for i in $DomainList
do
DomainResults=`dig $i |grep "192.168.31.249" | wc -l`
if [ $DomainResults -eq 0 ];then
echo "$i 没有解析" >> ErrorDomainList.txt #没有解析的域名存入这个文件里面
else
echo "$i" >> 3.txt #最终有解析的域名存入这个文件
fi
done

@ -0,0 +1,31 @@
# 基于gitlab的drone部署
[GitLab | Drone](https://docs.drone.io/server/provider/gitlab/)
[基于gitlab的drone的搭建和使用 - 系统运维 - 亿速云 (yisu.com)](https://www.yisu.com/zixun/15849.html)
# drone的pipeline quickstart
[Docker Pipelines | Drone](https://docs.drone.io/quickstart/docker/)
# drone 部署 springboot
[(101条消息) Drone CI/CD系列(三)——java语言(Springboot)之配置.drone.yml文件_Juinjonn的博客-CSDN博客_drone springboot](https://blog.csdn.net/Bert_Chen/article/details/123611752)
# 基于gitea的部署
https://docs.drone.io/server/provider/gitea/
# 解决settings页面没有Trusted的问题
https://blog.csdn.net/qq_35425070/article/details/106822191
# [gitea &amp; drone webhook推送不成功](https://www.cnblogs.com/shiningrise/p/16944140.html)
https://www.cnblogs.com/shiningrise/p/16944140.html
# Maven 项目使用 Drone 实现 CI/CD含镜像源配置
https://www.jianshu.com/p/65bdd465a5b2

@ -0,0 +1,63 @@
# https://www.yisu.com/zixun/15849.html
# https://docs.drone.io/server/provider/gitlab/
version: '3'
services:
drone-server:
restart: always ## 容器自动重启
image: drone/drone:2
container_name: drone-server-cloudnative
networks: ## 使用自定义网络,也可以不写
- drone-net-cloudnative
ports:
- "9901:80" ## 映射到宿主机的端口
volumes:
- ./data:/var/lib/drone
environment:
- DRONE_SERVER_HOST=drone.windymuse.com.cn ## 端口需要和映射至宿主机的一致
- DRONE_SERVER_PROTO=https
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## openssl rand -hex 16 自动生成
# true 表示允许注册false 表示关闭注册功能,此时只有 DRONE_ADMIN 中指定的账户才能登录
- DRONE_OPEN=true
# Drone 外网地址
- DRONE_HOST=https://drone.windymuse.com.cn
# 使用 Gitea
- DRONE_GITEA=true
# Gitea 的 Client ID
- DRONE_GITEA_CLIENT_ID=d1da07a5-1033-467b-b579-a6a411b20f0f
# Gitea 的 Secret
- DRONE_GITEA_CLIENT_SECRET=gto_ujniibp4ju546mnptk76homqkj5kcbtsuqgpu2dqdztdzkffcwkq
# Gitea 的 URL
- DRONE_GITEA_SERVER=https://gitea.windymuse.com.cn
# 设置admin权限以使用Trusted功能
# https://blog.csdn.net/qq_35425070/article/details/106822146
# https://blog.csdn.net/qq_35425070/article/details/106822191
# ,token:Ac7W1qgPyqxe9u00lI9E2IBrXpT8T6CA
- DRONE_USER_CREATE=username:maidong,admin:true
drone-runner-docker:
restart: always ## 容器自动重启
image: drone/drone-runner-docker:1
container_name: drone-runner-docker-cloudnative
networks:
- drone-net-cloudnative
ports:
- "9902:3000" ## 映射到宿主机的端口用于提供web页面显示任务执行情况可不映射
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- DRONE_RPC_PROTO=http
- DRONE_RPC_HOST=drone-server-cloudnative
- DRONE_RPC_SECRET=7e52616bc93b86d114f95de534011d82 ## 与server服务中保持一致
- DRONE_RUNNER_NAME=docker-worker
- DRONE_RUNNER_CAPACITY=2
- TZ=Asia/Shanghai
- DRONE_DEBUG=true
#- DRONE_LOGS_DEBUG=true
#- DRONE_LOGS_TRACE=true
- DRONE_UI_USERNAME=admin ## runner的web页面用户信息
- DRONE_UI_PASSWORD=password
depends_on:
- drone-server
networks: ## 可以不自定义
drone-net-cloudnative:
driver: bridge

@ -0,0 +1,15 @@
<settings xmlns="http://maven.apache.org/SETTINGS/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.1.0 http://maven.apache.org/xsd/settings-1.1.0.xsd">
<localRepository>/root/.m2/repository</localRepository>
<mirrors>
<mirror>
<id>alimaven</id>
<name>aliyun</name>
<mirrorOf>central</mirrorOf>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
</mirrors>
<pluginGroups>
<pluginGroup>org.apache.maven.plugins</pluginGroup>
<pluginGroup>org.codehaus.mojo</pluginGroup>
</pluginGroups>
</settings>

@ -0,0 +1,28 @@
version: "3"
networks:
gitea:
external: false
services:
server:
image: gitea/gitea:1.18.1
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=rm-wz9yxho9eg3x33hpono.mysql.rds.aliyuncs.com:3306
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=ALVPD3iumzjBX4hiMjo3AWYN
restart: always
networks:
- gitea
volumes:
- ./gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"

@ -0,0 +1,5 @@
# [gitea &amp; drone webhook推送不成功](https://www.cnblogs.com/shiningrise/p/16944140.html)
https://www.cnblogs.com/shiningrise/p/16944140.html

@ -0,0 +1,13 @@
version: '3'
services:
pgweb:
image: dpage/pgadmin4
container_name: pgadmin
ports:
- "18081:80"
environment:
PGADMIN_DEFAULT_EMAIL: "zeng32@qq.com"
PGADMIN_DEFAULT_PASSWORD: "123456"
volumes:
- ./pgadmin:/var/lib/pgadmin
restart: always

@ -0,0 +1,12 @@
version: "3.2"
services:
portainer:
image: portainer/portainer-ce
container_name: portainer
restart: always
ports:
- "8008:8000"
- "9009:9000"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/data

@ -0,0 +1,19 @@
version: '3.1'
services:
postgresql:
image: postgres:14
container_name: postgresql
restart: always
environment:
POSTGRES_PASSWORD: root
ports:
- 5432:5432
volumes:
- ./data:/var/lib/postgresql/data
adminer:
image: adminer
container_name: postgresql-adminer
restart: always
ports:
- 8888:8080

@ -0,0 +1,38 @@
version: '3'
services:
node_exporter:
image: prom/node-exporter:latest
container_name: node_exporter
command:
- '--path.rootfs=/host'
pid: host
user: root
restart: unless-stopped
environment:
- TZ=Asia/Shanghai
ports:
- 9100:9100
volumes:
- '/:/host:ro,rslave'
prometheus:
image: prom/prometheus:latest
restart: always
container_name: prometheus
hostname: prometheus
environment:
- TZ=Asia/Shanghai
ports:
- 9090:9090
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
- '--storage.tsdb.retention.time=7d'
- '--web.external-url=prometheus'
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- ./promdata:/prometheus

@ -0,0 +1,27 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'node-exporter'
static_configs:
- targets: ['192.168.31.249:9100']
labels:
host: myhost01

@ -0,0 +1,11 @@
# docker-compose搭建prometheus+grafana监控体系详细步骤说明(实战)
https://blog.csdn.net/qq_36595568/article/details/124285925
# grafana监控id 1860
https://grafana.com/grafana/dashboards/1860-node-exporter-full/

@ -0,0 +1,423 @@
# Note on units: when memory size is needed, it is possible to specifiy
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# Redis默认不是以守护进程的方式运行可以通过该配置项修改使用yes启用守护进程
# 启用守护进程后Redis会把pid写到一个pidfile中在/var/run/redis.pid
daemonize no
# 当Redis以守护进程方式运行时Redis默认会把pid写入/var/run/redis.pid文件可以通过pidfile指定
pidfile /var/run/redis.pid
# 指定Redis监听端口默认端口为6379
# 如果指定0端口表示Redis不监听TCP连接
port 6379
# 绑定的主机地址
# 你可以绑定单一接口,如果没有绑定,所有接口都会监听到来的连接
bind 0.0.0.0
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 755
# 当客户端闲置多长时间后关闭连接如果指定为0表示关闭该功能
timeout 0
# 指定日志记录级别Redis总共支持四个级别debug、verbose、notice、warning默认为verbose
# debug (很多信息, 对开发/测试比较有用)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel verbose
# 日志记录方式默认为标准输出如果配置为redis为守护进程方式运行而这里又配置为标准输出则日志将会发送给/dev/null
logfile /logs/redis.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# 设置数据库的数量默认数据库为0可以使用select <dbid>命令在连接上指定数据库id
# dbid是从0到databases-1的数目
databases 16
################################ SNAPSHOTTING #################################
# 指定在多长时间内,有多少次更新操作,就将数据同步到数据文件,可以多个条件配合
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# 满足以下条件将会同步数据:
# 900秒15分钟内有1个更改
# 300秒5分钟内有10个更改
# 60秒内有10000个更改
# Note: 可以把所有“save”行注释掉这样就取消同步操作了
save 900 1
save 300 10
save 60 10000
# 指定存储至本地数据库时是否压缩数据默认为yesRedis采用LZF压缩如果为了节省CPU时间可以关闭该选项但会导致数据库文件变的巨大
rdbcompression yes
# 指定本地数据库文件名默认值为dump.rdb
dbfilename dump.rdb
# 工作目录.
# 指定本地数据库存放目录文件名由上一个dbfilename配置项指定
#
# Also the Append Only File will be created inside this directory.
#
# 注意,这里只能指定一个目录,不能指定文件名
dir ./
################################# REPLICATION #################################
# 主从复制。使用slaveof从 Redis服务器复制一个Redis实例。注意该配置仅限于当前slave有效
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
# 设置当本机为slav服务时设置master服务的ip地址及端口在Redis启动时它会自动从master进行数据同步
# slaveof <masterip> <masterport>
# 当master服务设置了密码保护时slav服务连接master的密码
# 下文的“requirepass”配置项可以指定密码
# masterauth <master-password>
# When a slave lost the connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of data data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
################################## SECURITY ###################################
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
# 设置Redis连接密码如果配置了连接密码客户端在连接Redis时需要通过auth <password>命令提供密码,默认关闭
requirepass redis
# Command renaming.
#
# It is possilbe to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# of hard to guess so that it will be still available for internal-use
# tools but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possilbe to completely kill a command renaming it into
# an empty string:
#
# rename-command CONFIG ""
################################### LIMITS ####################################
# 设置同一时间最大客户端连接数默认无限制Redis可以同时打开的客户端连接数为Redis进程可以打开的最大文件描述符数
# 如果设置maxclients 0表示不作限制。当客户端连接数到达限制时Redis会关闭新的连接并向客户端返回max Number of clients reached错误信息
# maxclients 128
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys with an
# EXPIRE set. It will try to start freeing keys that are going to expire
# in little time and preserve keys with a longer time to live.
# Redis will also try to remove objects from free lists if possible.
#
# If all this fails, Redis will start to reply with errors to commands
# that will use more memory, like SET, LPUSH, and so on, and will continue
# to reply to most read-only commands like GET.
#
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
# 'state' server or cache, not as a real DB. When Redis is used as a real
# database the memory usage will grow over the weeks, it will be obvious if
# it is going to use too much memory in the long run, and you'll have the time
# to upgrade. With maxmemory after the limit is reached you'll start to get
# errors for write operations, and this may even lead to DB inconsistency.
# 指定Redis最大内存限制Redis在启动时会把数据加载到内存中达到最大内存后Redis会先尝试清除已到期或即将到期的Key
# 当此方法处理后,仍然到达最大内存设置,将无法再进行写入操作,但仍然可以进行读取操作。
# Redis新的vm机制会把Key存放内存Value会存放在swap区
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached? You can select among five behavior:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys->random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with all the kind of policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
#
# Note that you can have both the async dumps and the append only file if you
# like (you have to comment the "save" statements above to disable the dumps).
# Still if append only mode is enabled Redis will load the data from the
# log file at startup ignoring the dump.rdb file.
# 指定是否在每次更新操作后进行日志记录Redis在默认情况下是异步的把数据写入磁盘如果不开启可能会在断电时导致一段时间内的数据丢失。
# 因为redis本身同步数据文件是按上面save条件来同步的所以有的数据会在一段时间内只存在于内存中。默认为no
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
# log file in background when it gets too big.
appendonly yes
# 指定更新日志文件名默认为appendonly.aof
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
# 指定更新日志条件共有3个可选值
# no:表示等操作系统进行数据缓存同步到磁盘(快)
# always:表示每次更新操作后手动调用fsync()将数据写到磁盘(慢,安全)
# everysec:表示每秒同步一次(折衷,默认值)
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving the durability of Redis is
# the same as "appendfsync none", that in pratical terms means that it is
# possible to lost up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (or if no rewrite happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a precentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 1024
################################ VIRTUAL MEMORY ###############################
### WARNING! Virtual Memory is deprecated in Redis 2.4
### The use of Virtual Memory is strongly discouraged.
### WARNING! Virtual Memory is deprecated in Redis 2.4
### The use of Virtual Memory is strongly discouraged.
# Virtual Memory allows Redis to work with datasets bigger than the actual
# amount of RAM needed to hold the whole dataset in memory.
# In order to do so very used keys are taken in memory while the other keys
# are swapped into a swap file, similarly to what operating systems do
# with memory pages.
# 指定是否启用虚拟内存机制默认值为no
# VM机制将数据分页存放由Redis将访问量较少的页即冷数据swap到磁盘上访问多的页面由磁盘自动换出到内存中
# 把vm-enabled设置为yes根据需要设置好接下来的三个VM参数就可以启动VM了
# vm-enabled no
# vm-enabled yes
# This is the path of the Redis swap file. As you can guess, swap files
# can't be shared by different Redis instances, so make sure to use a swap
# file for every redis process you are running. Redis will complain if the
# swap file is already in use.
#
# Redis交换文件最好的存储是SSD固态硬盘
# 虚拟内存文件路径,默认值为/tmp/redis.swap不可多个Redis实例共享
# *** WARNING *** if you are using a shared hosting the default of putting
# the swap file under /tmp is not secure. Create a dir with access granted
# only to Redis user and configure Redis to create the swap file there.
# vm-swap-file /tmp/redis.swap
# With vm-max-memory 0 the system will swap everything it can. Not a good
# default, just specify the max amount of RAM you can in bytes, but it's
# better to leave some margin. For instance specify an amount of RAM
# that's more or less between 60 and 80% of your free RAM.
# 将所有大于vm-max-memory的数据存入虚拟内存无论vm-max-memory设置多少所有索引数据都是内存存储的Redis的索引数据就是keys
# 也就是说当vm-max-memory设置为0的时候其实是所有value都存在于磁盘。默认值为0
# vm-max-memory 0
# Redis swap文件分成了很多的page一个对象可以保存在多个page上面但一个page上不能被多个对象共享vm-page-size是要根据存储的数据大小来设定的。
# 建议如果存储很多小对象page大小最后设置为32或64bytes如果存储很大的对象则可以使用更大的page如果不确定就使用默认值
# vm-page-size 32
# 设置swap文件中的page数量由于页表一种表示页面空闲或使用的bitmap是存放在内存中的在磁盘上每8个pages将消耗1byte的内存
# swap空间总容量为 vm-page-size * vm-pages
#
# With the default of 32-bytes memory pages and 134217728 pages Redis will
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
#
# It's better to use the smallest acceptable value for your application,
# but the default is large in order to work in most conditions.
# vm-pages 134217728
# Max number of VM I/O threads running at the same time.
# This threads are used to read/write data from/to swap file, since they
# also encode and decode objects from disk to memory or the reverse, a bigger
# number of threads can help with big objects even if they can't help with
# I/O itself as the physical device may not be able to couple with many
# reads/writes operations at the same time.
# 设置访问swap文件的I/O线程数最后不要超过机器的核数如果设置为0那么所有对swap文件的操作都是串行的可能会造成比较长时间的延迟默认值为4
# vm-max-threads 4
############################### ADVANCED CONFIG ###############################
# Hashes are encoded in a special way (much more memory efficient) when they
# have at max a given numer of elements, and the biggest element does not
# exceed a given threshold. You can configure this limits with the following
# configuration directives.
# 指定在超过一定的数量或者最大的元素超过某一临界值时,采用一种特殊的哈希算法
# hash-max-zipmap-entries 512
# hash-max-zipmap-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rhashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
# 指定是否激活重置哈希,默认为开启
activerehashing yes
################################## INCLUDES ###################################
# 指定包含其他的配置文件可以在同一主机上多个Redis实例之间使用同一份配置文件而同时各实例又拥有自己的特定配置文件
# include /path/to/local.conf
# include /path/to/other.conf

@ -0,0 +1,14 @@
# https://zhuanlan.zhihu.com/p/384332104
version: '3'
services:
redis:
image: redis:4.0.1
restart: always
container_name: redis-401
volumes:
- ./data:/data
- ./conf/redis.conf:/usr/local/etc/redis/redis.conf
- ./logs:/logs
command: redis-server /usr/local/etc/redis/redis.conf
ports:
- '6379:6379'

@ -0,0 +1,13 @@
#!/bin/bash
set -e
tm=`date +%Y%m%d-%H%M%S`
echo "====================================================="
echo "Starting redis."
echo "Starting <${tm}>."
echo "====================================================="
echo "Please ensure the config files right."
echo "====================================================="
docker-compose -p redis up -d

@ -0,0 +1,13 @@
#!/bin/bash
set -e
tm=`date +%Y%m%d-%H%M%S`
echo "====================================================="
echo "Starting redis."
echo "Starting <${tm}>."
echo "====================================================="
echo "Please ensure the config files right."
echo "====================================================="
docker-compose -p redis down

@ -0,0 +1,35 @@
# 参考文档
[docker-compose部署SonarQube - 掘金 (juejin.cn)](https://juejin.cn/post/7027362354547392526)
# 启动时候遇到的问题
## 权限问题
chmod -R 777 .
## max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
[(101条消息) max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]_无关痛痒qaq的博客-CSDN博客](https://blog.csdn.net/qq_43655835/article/details/104633359)
### 原因
**sonarqube底层用的是elasticsearch**
# 默认用户名和密码
admin
admin
# 新密码
admin123
# 与gitlab集成
[(101条消息) SonarQube与GitLab集成_椰汁菠萝的博客-CSDN博客_gitlab sonarqube](https://blog.csdn.net/suo082407128/article/details/119328421)
# 安装中文插件
[sonar+Jenkins 构建代码质量自动化分析平台 - 腾讯云开发者社区-腾讯云 (tencent.com)](https://cloud.tencent.com/developer/article/1698814#:~:text=%E7%99%BB%E9%99%86sonar%E5%90%8E%EF%BC%8C%E5%9C%A8Administration%E2%80%93%3ESystem%E2%80%93%3EUpdate,Center%E2%80%93%3EAvailable%E4%B8%AD%E6%90%9C%E7%B4%A2Chinese%E5%B0%B1%E5%8F%AF%E4%BB%A5%E7%9C%8B%E5%88%B0%E4%B8%AD%E6%96%87%E6%8F%92%E4%BB%B6%E5%8C%85%EF%BC%8C%E7%82%B9%E5%87%BBinstall%E5%8D%B3%E5%8F%AF%E5%AE%89%E8%A3%85%E3%80%82)

@ -0,0 +1,41 @@
# https://juejin.cn/post/7027362354547392526
version: '3'
services:
# postgres:
# image: postgres:12
# restart: always
# container_name: postgres
# ports:
# - 5432:5432
# volumes:
# - /opt/postgres/postgresql/:/var/lib/postgresql
# - /opt/postgres/data/:/var/lib/postgresql/data
# environment:
# TZ: Asia/Shanghai
# POSTGRES_USER: sonar
# POSTGRES_PASSWORD: sonar123
# POSTGRES_DB: sonar
# networks:
# - sonar-network
sonar:
image: sonarqube:9.8-community
restart: always
container_name: sonar
# depends_on:
# - postgres
volumes:
- ./extensions:/opt/sonarqube/extensions
- ./logs:/opt/sonarqube/logs
- ./data:/opt/sonarqube/data
- ./conf:/opt/sonarqube/conf
ports:
- 9000:9000
environment:
SONARQUBE_JDBC_USERNAME: postgres
SONARQUBE_JDBC_PASSWORD: root
SONARQUBE_JDBC_URL: jdbc:postgresql://192.168.31.249:5432/postgres
networks:
- sonar-network
networks:
sonar-network:
driver: bridge

@ -0,0 +1,18 @@
version: '3'
services:
telegraf:
image: telegraf
container_name: telegraf
restart: always
volumes:
- ./telegraf:/etc/telegraf
- /sys:/rootfs/sys:ro
- /proc:/rootfs/proc:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- 'HOST_PROC=/rootfs/proc'
- 'HOST_SYS=/rootfs/sys'
- /etc/localtime:/etc/localtime:ro

@ -0,0 +1,11 @@
version: '3.3'
services:
uptime-kuma:
image: louislam/uptime-kuma:latest
container_name: uptime-kuma
restart: always
volumes:
- ./uptime-kuma:/app/data
ports:
- 3001:3001

@ -0,0 +1,5 @@
用户名和密码
admin
rwc@ync0gph1JRJ@bhp

@ -0,0 +1,204 @@
[2022-08-24 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 9, '200 - OK', 6, 1, '2022-08-24 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-08-27 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 1, '200 - ', 182, 1, '2022-08-27 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-08-29 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 4, '200 - ', 199, 1, '2022-08-29 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-08-31 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (61, false, 8, '200 - OK', 253, 1, '2022-08-31 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-09-02 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 3, '200 - ', 167, 1, '2022-09-02 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-09-06 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 6, 1, '2022-09-06 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-09-16 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 12, '200 - OK', 6, 1, '2022-09-16 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-10-29 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 2, '200 - ', 8, 1, '2022-10-29 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-05 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-11-05 03:14:04') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-12 03:14:06] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-11-12 03:14:05') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-13 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 11, '200 - OK', 10, 1, '2022-11-13 03:14:03') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-14 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 1, '200 - ', 158, 1, '2022-11-14 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-17 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 7, 1, '2022-11-17 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-19 03:14:07] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-11-19 03:14:05') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-20 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 6, 1, '2022-11-20 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-21 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 11, '200 - OK', 9, 1, '2022-11-21 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-11-30 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (64, false, 8, '200 - OK', 4296, 1, '2022-11-30 03:13:57') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-03 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 2, '200 - ', 9, 1, '2022-12-03 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-05 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (61, false, 13, '200 - OK', 6, 1, '2022-12-05 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-05 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 11, '200 - OK', 7, 1, '2022-12-05 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-06 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 1, '200 - ', 150, 1, '2022-12-06 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-07 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 14, '200 - OK', 7, 1, '2022-12-07 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-08 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 9, '200 - OK', 13, 1, '2022-12-08 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-10 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 14, '200 - OK', 9, 1, '2022-12-10 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-10 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (65, false, 8, '200 - OK', 4460, 1, '2022-12-10 03:13:58') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-10 03:14:06] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 9, 1, '2022-12-10 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-13 03:14:01] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-12-13 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-15 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 13, '200 - OK', 14, 1, '2022-12-15 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-19 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 14, '200 - OK', 6, 1, '2022-12-19 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-19 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 5, 1, '2022-12-19 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-19 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (64, false, 8, '200 - OK', 3822, 1, '2022-12-19 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-20 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-12-20 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-27 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2022-12-27 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-27 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (61, false, 3, '200 - ', 185, 1, '2022-12-27 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-28 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 9, '200 - OK', 7, 1, '2022-12-28 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2022-12-29 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 8, '200 - OK', 11, 1, '2022-12-29 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-01 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 8, '200 - OK', 10, 1, '2023-01-01 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-01 03:14:06] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 7, '200 - OK', 6, 1, '2023-01-01 03:14:04') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-03 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `status`, `time`) values (60, false, 5, 'certificate has expired', 0, '2023-01-03 03:14:03') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-04 03:14:03] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 8, '200 - OK', 9, 1, '2023-01-04 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-07 03:14:02] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 1, '200 - ', 149, 1, '2023-01-07 03:14:00') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-07 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 9, '200 - OK', 6, 1, '2023-01-07 03:14:03') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-08 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 14, '200 - OK', 6, 1, '2023-01-08 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-08 03:14:05] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 10, '200 - OK', 6, 1, '2023-01-08 03:14:01') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-08 03:14:08] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 6, '200 - OK', 6, 1, '2023-01-08 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-08 03:14:09] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 11, '200 - OK', 9, 1, '2023-01-08 03:14:03') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-10 03:14:06] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 5, '200 - OK', 7, 1, '2023-01-10 03:14:04') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-10 03:14:10] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 8, '200 - OK', 10, 1, '2023-01-10 03:14:09') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-11 03:14:04] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 6, '200 - OK', 5, 1, '2023-01-11 03:14:02') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-11 03:14:06] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (60, false, 4, '200 - ', 185, 1, '2023-01-11 03:14:03') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
[2023-01-11 03:14:08] [Error: insert into `heartbeat` (`duration`, `important`, `monitor_id`, `msg`, `ping`, `status`, `time`) values (61, false, 1, '200 - ', 148, 1, '2023-01-11 03:14:05') - SQLITE_BUSY: database is locked] {
errno: 5,
code: 'SQLITE_BUSY'
}
Loading…
Cancel
Save