Compare commits

..

10 Commits

Author SHA1 Message Date
1708-huayu 43b2129767 backup 2024-09-22 11:58:43 +08:00
1708-huayu fc99a64d28 backup 2023-12-02 13:46:34 +08:00
1708-huayu 35d57e4c7c backup 2023-11-07 15:38:13 +08:00
1708-huayu bec48cac9b backup 2023-10-16 21:25:31 +08:00
1708-huayu ac8f7f10b9 备份 2023-10-16 21:25:31 +08:00
1708-huayu abc68d51ba 备份 2023-10-16 21:25:31 +08:00
HuaYu dcc9b1bf52 backup 2023-10-16 18:40:59 +08:00
HuaYu 27358f3a8a 备份 2023-09-27 22:40:01 +08:00
HuaYu b19fecd173 备份 2023-09-26 21:55:59 +08:00
HuaYu 5f922e5a58 fix:添加描述 2023-09-19 22:19:10 +08:00
57 changed files with 1457 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# Default ignored files
/shelf/
/.idea/workspace.xml
.idea

34
df/磁盘处理记录.md Normal file
View File

@ -0,0 +1,34 @@
误删启动盘又想加载之前的信息。
基本命令:
```shell
# 查看磁盘使用
df
# 磁盘管理
fdisk --help
# 磁盘挂载
mount /dev/sda2 old
# mount: /home/shixiaohua/old: mount(2) system call failed: Cannot allocate memory.
# 追踪错误信息
strace mount /dev/sda2 old
```
将磁盘挂载到系统,写入配置文件避免每次都点击文件加载。
查看挂载
```sh
# 查看磁盘
df -h
# 修改挂载配置信息
vim /etc/fstab
dev/sda3 /media/shixiaohua/a44c8a7c-5b7a-4e2a-8810-5af0561c94ac ext4 defaults 0 1
# 查看挂载信息
mount
# 查看磁盘分区
lsblk
```

76
df/项目启动.md Normal file
View File

@ -0,0 +1,76 @@
# 项目启动
## zookeeper 启动
```shell
## 启动
zkServer.sh start
## 查看状态
zkServer.sh status
```
## kafka启动
```shell
## 启动
./kafka-server-start.sh -daemon ../config/server.properties
## 创建主题
./kafka-topics.sh --zookeeper service:2181,service-node1:2181,service-node2:2181/bigdata/kafka --create --topic "t_traffic" --partitions 3 --replication-factor 2
```
## flume启动
```shell
flume-ng agent -n a -f ./to_kafka.conf -Dflume.root.logger=INFO,console
```
## hadoop启动
```shell
## 启动全部node节点
start-all.sh
## 关闭安全模式
hdfs dfsadmin -safemode leave
```
## yarn启动
```shell
yarn-daemon.sh start resourcemanager
```
## hive启动
```shell
## 启动元数据
hive --service metastore &
## 启动客户端
hive
```
## canal
启动
```sh
cd /usr/local/capsoftware/canal/deployer/bin
sh startup.sh
```
## ES
启动
```sh
cd /usr/local/capsoftware/es/elasticsearch-7.12.0/bin
su es
./elasticsearch
# 是否启动http://localhost:9200/
# header插件
cd /usr/local/capsoftware/es/elasticsearch-7.12.0/elasticsearch-head
npm run start
```

211
dns/CoreDNS.md Normal file
View File

@ -0,0 +1,211 @@
## 配置
```shell
cat > coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
```
## 启动
```shell
kubectl apply -f coredns.yaml
```

42
dns/CoreDNS绑定bind.md Normal file
View File

@ -0,0 +1,42 @@
```sh
# 查看所在pod
[root@hy-node3 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7cc8dd57d9-jhvnj 1/1 Running 4 7d21h
calico-node-lm24q 1/1 Running 9 20d
calico-node-p2pn4 1/1 Running 5 20d
coredns-675db8b7cc-s6z7g 1/1 Running 1 7d21h
snapshot-controller-0 1/1 Running 0 7d6h
# 找到对应service,使用serviceip
[root@hy-node3 ~]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-controller-manager-svc ClusterIP None <none> 10257/TCP 13d
kube-dns ClusterIP 10.96.0.2 <none> 53/UDP,53/TCP,9153/TCP 20d
kube-scheduler-svc ClusterIP None <none> 10259/TCP 13d
kubelet ClusterIP None <none> 10250/TCP,10255/TCP,4194/TCP 13d
# 找个ip查看dns是否能够解析
[root@hy-node3 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 21d
nginx-service-nodeport NodePort 10.96.231.91 <none> 80:30001/TCP 20d
# 解析测试
dig -t a www.baidu.com @10.96.0.2
[root@hy-node3 ~]# cat /etc/resolv.conf
# Generated by NetworkManager
nameserver 192.168.1.120
nameserver 223.6.6.6
nameserver 223.5.5.5
# 编辑coredns配置文件
kubectl edit configmap coredns -n kube-system
```
![修改core-dns配置文件](img/QQ截图20230927214654.png)
```sh
# 查看是否可以滚动升级
[root@hy-node3 ~]# kubectl get deployment.apps -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
calico-kube-controllers 1/1 1 1 20d
coredns 1/1 1 1 20d
[root@hy-node3 ~]# kubectl rollout restart deployment coredns -n kube-system
deployment.apps/coredns restarted
```

24
dns/bind.md Normal file
View File

@ -0,0 +1,24 @@
```sh
yum -y install bind
vi /etc/named.conf
vi /etc/named.rfc1912.zones
cd /var/named/
ll
cp -p named.localhost hy.com
cp -p named.localhost hy.com.zone
ll
rm hy.com
ll
vivi
vi hy.com.zone
systemctl enbled named
systemctl enable named
systemctl restart named
systemctl start named
systemctl status named.service
vi hy.com.zone
vi /etc/named.rfc1912.zones
vi hy.com.zone
systemctl start named
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

30
docker/docker-push.md Normal file
View File

@ -0,0 +1,30 @@
# push镜像使用http
```
# 修改配置文件
vi /etc/docker/daemon.json
{
"insecure-registries": ["hy-node4:80"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
# 如果还是不行使用registry
docker run -d -p 5000:5000 --restart=always --name registry registry:2
```
```shell
# 打包
docker buildx build . --tag 172.21.13.108:80/platform-v3/jeecg-boot-system:3.1.0923-mdm
# 登陆认证
root@shixiaohua:/run/containerd# docker login http://172.21.13.108:80
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
# 打标签 docker tag imageId ip:port/name:version
docker tag 4d69ba36298c 172.21.13.108:80/platform-v3/jeecg-boot-system:3.1.0922-mdm
# 推送远程仓库
docker push 172.21.13.108:80/platform-v3/jeecg-boot-system:3.1.0922-mdm
```

View File

16
docker/指令记录.md Normal file
View File

@ -0,0 +1,16 @@
```javascript
# 设置容器总是重启
docker update --restart=always f854fe55ed76
# 查看网络​
docker network ls
# 创建桥接网络task-manager
docker network create --driver bridge task-manager
# 将现有的容器8a74f2c64cdc连接到网络task-manager
docker network connect task-manager 8a74f2c64cdc
# 查看网络task-manager
docker network inspect task-manager
# 运行容器时使用网络task-manager
docker run -d -p 8090:8090 --network task-manager --restart unless-stopped -v ./hosts:/etc/hosts --name task-manager-server task-manager-server
```

View File

11
ftp/ftp.md Normal file
View File

@ -0,0 +1,11 @@
配置文件地址
cat /etc/vsftpd.conf
日志地址
sudo cat /var/log/vsftpd.log
重启服务
sudo systemctl restart vsftpd

File diff suppressed because one or more lines are too long

60
harbor/Harbor.md Normal file
View File

@ -0,0 +1,60 @@
需要安装docker和docker-compose
[https://goharbor.io/docs/2.9.0/install-config/](https://goharbor.io/docs/2.9.0/install-config/)
# docker-compose
```shell
# 下载
wget https://github.com/docker/compose/releases/download/v2.20.2/docker-compose-linux-x86_64
# 设置可执行权限
chomd +x docker-compose-linux-x86_64
# 移动文件到
mv docker-compose-linux-x86_64 /usr/bin/docker-compose
# 查看版本
docker-compose -v
```
# 安装harbor
```shell
wget https://github.com/goharbor/harbor/releases/download/v2.8.3/harbor-offline-installer-v2.8.3.tgz
# 修改配置文件
harvor.yml
# 修改hostname
# 注销https
# 准备执行
./prepare
# 安装
./install
```
# 启动
https://goharbor.io/docs/2.9.0/install-config/run-installer-script/
```
# pwd
/software/harbor
# 启动
docker-compose up -d
# 停止
docker-compose down -v
```
# 修改配置使配置重新生效
## 参考网址
https://goharbor.io/docs/2.9.0/install-config/reconfigure-manage-lifecycle/
## 步骤
```
# 停止应用
docker-compose down -v
# 修改配置
vim harbor.yml
# 重新编译
./prepare
./prepare --with-trivy
# 重新启动
docker-compose up -d
```
# 在k8s中使用
## 方式一
在kubesphere中配置加密字典添加harbor秘钥
在创建页面中可以选择自己配置秘钥的路径,
使用准确镜像及targ否则无法找到

5
k8s/hpa.md Normal file
View File

@ -0,0 +1,5 @@
HorizontalPodAutoscaler
动态扩缩容基于cpu和内存
安装插件
1. gihub中查找metrics-server应用
2. 修改yml中镜像,认证端口整数 和volume

Binary file not shown.

After

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 302 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

22
k8s/kubernetes.md Normal file
View File

@ -0,0 +1,22 @@
### 主要功能
1. 数据卷pod中和pod之间的容器的数据共享
2. 资源监控
3. 弹性伸缩
4. 服务发现
5. 负载均衡
6. 滚动更新
7. 服务编排
8. 应用程序健康检查
9. 提供认证和授权
10. 复制应用程序实例
### 核心对象
1. pod
2. volume
3. service
4. deployment
5. daemonSet
6. statefulset
7. job

6
k8s/kubesphere/记录.md Normal file
View File

@ -0,0 +1,6 @@
路径
http://hy-node3:30880/clusters/default/base-info
admin
Sxh123456!
网络端口查看
ss -anput |grep port

24
k8s/port.md Normal file
View File

@ -0,0 +1,24 @@
kubectl explain service.spec.ports
nodeport
clusterid
headlss
service
开发人员访问路径
![](img/微信截图_20230922225230.png)
用户访问服务实例
![微信截图_20230922224028.png](img/微信截图_20230922224028.png)
常用服务暴露插件
- nginx-ingress 测试版,重量级
- traefik 反向代理,负载均衡,webui
traefik-ds.yml
traefik-rbac.yml
trarfik-ui.yml
修改ingress两种方法
1. 修改yaml文件
2. kubectl edit ingerss nginx -n default

98
k8s/pv.md Normal file
View File

@ -0,0 +1,98 @@
PV
使用nfs
```
yum install -y nfs-utils
# 查看是否安装
cat /etc/exports
showmount -e 127.0.0.1
systemctl start rpcbind
systemctl enable rpcbind
systemctl status rpcbind
mkdir /data/nfs
mount -f nfs 10.0.0.245:/data/nfs /data/nfs
```
编辑pv.yml
![pv.yml](img/pvQQ截图20231012222434.png)
pvc
![pvc](img/pvcQQ截图20231012223858.png)
deployment
```
apiVersion: v1
kind: Deployment
metadata:
name: dp-pvc
namespace: kube-test
labels:
app: dp-pvc
spec:
selector:
matchLabels:
app: dp-pvc
template:
metadata:
labels:
app: dp-pvc
spec:
containers:
- name :dp-pvc
image: harbor.captain.com/captain/nginx:1.19.8-alpine
ports:
- containerPort: 80
volumeMounts:
- name: dp-pvc
mountPath: /usr/share/nginx/html
volumes:
- name: dp-pvc
persistentVolumeClaim:
claimName: pvc
```
service
```
apiVersion: v1
kind: Service
metadata:
name: dp-pvc
namespace: kube-system
labels:
app: dp-pvc
spec:
# type: ClusterIP
selector:
app: volume
# clusterIP:
ports:
- name: dp-pvc
protocol: TCP
port: 80
targetPort: 80
```
ingress
```
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: nginx
namespace: kube-system
labels:
app: nginx
spec:
rules:
- host:
http:
paths:
- path: /
backend:
serviceName:
servicePort:
```

3
k8s/rbac.md Normal file
View File

@ -0,0 +1,3 @@
用户:ServiceAccount
角色:Role,ClusterRole(集群角色无需namespace)
绑定:RoleBinding,ClusterRoleBinding

4
k8s/statefulset.md Normal file
View File

@ -0,0 +1,4 @@
sts
1. 有序部署
2. 固定的IP无clusterIP

View File

@ -0,0 +1,4 @@
### 工具图形化 UI
1. kubesphere
2. Lens

30
k8s/有状态存储.md Normal file
View File

@ -0,0 +1,30 @@
1. 抽取配置ConfigMap配置字典security
2. 使用PVC持久化存储数据
3. 使用DNS提供稳定的域名
持久卷-服务服务和pod-应用路由(集群外访问)
应用路由
service中包含dns
```shell
ping sv-mdm.mdm-9-22
ping sv-mdm.mdm-9-22.svc.cluster.local
```
headless-service域名仅供k8s内部使用
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/cloud/deploy.yaml
nslookup 查看域名
kubectl get pods --namespace=ingress-nginx
setforset
opeanelbapisix

22
k8s/配置分离.md Normal file
View File

@ -0,0 +1,22 @@
# configmap
## 使用配置文件
kubectl creat cm env --from-file= -n kube-ticai
![](img/cmQQ截图20231015211952.png)
## 使用环境变量
kubectl creat cm env --from-env-file= -n kube-ticai
envFrom:
# secrity
## opaque 通用型
kubectl create secret generic test --from-file=
kubectl get secret test-v1 -O jsonpath='{.data}'
## kubenretes.io/service-account-token表示k8s服务账户
## kubernetes.io/dockerconfigjson
![](img/secretgarbirQQ截图20231015221730.png)
## kubernetes.io/tls
## kubernetes.io/ssh-auth/basic-auth

10
maven/依赖查找.md Normal file
View File

@ -0,0 +1,10 @@
查找依赖树
```sh
mvn dependency:tree -Dverbose -Dincludes=org.apache.commons:commons-parent
```

30
sources.list Normal file
View File

@ -0,0 +1,30 @@
deb https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-security main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-updates main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-proposed main restricted universe multiverse
deb https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-backports main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-security main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-updates main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-proposed main restricted universe multiverse
deb-src https://mirrors.ustc.edu.cn/ubuntu-releases/ jammy bionic-backports main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ jammy bionic main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ jammy bionic-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ jammy bionic-updates main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ jammy bionic-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ jammy bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ jammy bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ jammy bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ jammy bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ jammy bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ jammy bionic-backports main restricted universe multiverse
deb http://cn.archive.ubuntu.com/ubuntu/ jammy xenial main restricted universe multiverse
deb http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-security main restricted universe multiverse
deb http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-updates main restricted universe multiverse
deb http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-backports main restricted universe multiverse
deb-src http://cn.archive.ubuntu.com/ubuntu/ jammy xenial main restricted universe multiverse
deb-src http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-security main restricted universe multiverse
deb-src http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-updates main restricted universe multiverse
deb-src http://cn.archive.ubuntu.com/ubuntu/ jammy xenial-backports main restricted universe multiverse

View File

@ -12,8 +12,14 @@ cat /etc/sysconfig/network
hostname hy-node1 hostname hy-node1
# 修改文件永久生效 # 修改文件永久生效
vi /etc/hostname vi /etc/hostname
hy-node6
# localhost.localdomain
```
```
# 绑定ip # 绑定ip
vi /etc/hosts vi /etc/hosts
192.168.1.116 hy-node1 hy-node1.com 192.168.1.116 hy-node1 hy-node1.com
192.168.1.117 hy-node2 192.168.1.117 hy-node2
192.168.1.118 hy-node3 192.168.1.118 hy-node3

View File

Before

Width:  |  Height:  |  Size: 5.0 KiB

After

Width:  |  Height:  |  Size: 5.0 KiB

View File

Before

Width:  |  Height:  |  Size: 426 KiB

After

Width:  |  Height:  |  Size: 426 KiB

View File

@ -0,0 +1,69 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: jeecg-boot-redis
namespace: mdm-9-22
labels:
k8s.kuboard.cn/name: jeecg-boot-redis
annotations:
kubesphere.io/creator: admin
spec:
replicas: 1
selector:
matchLabels:
k8s.kuboard.cn/name: jeecg-boot-redis
template:
metadata:
creationTimestamp: null
labels:
k8s.kuboard.cn/name: jeecg-boot-redis
annotations:
cni.projectcalico.org/ipAddrs: '["10.244.10.88"]'
kubectl.kubernetes.io/restartedAt: '2023-08-28T18:27:24+08:00'
kubesphere.io/creator: admin
kubesphere.io/restartedAt: '2023-09-25T05:30:36.407Z'
spec:
volumes:
- name: vol-mdm
persistentVolumeClaim:
claimName: pvc-mdm
containers:
- name: jeecg-boot-redis
image: 'redis:5.0'
ports:
- hostPort: 6380
containerPort: 6379
protocol: TCP
resources: {}
volumeMounts:
- name: vol-mdm
mountPath: /mnt
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
privileged: true
runAsUser: 0
runAsGroup: 0
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext:
seLinuxOptions: {}
imagePullSecrets:
- name: login
affinity: {}
schedulerName: default-scheduler
hostAliases:
- ip: 10.244.10.66
hostnames:
- jeecg-boot-system-mdm
dnsConfig: {}
serviceName: jeecg-boot-redis
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
revisionHistoryLimit: 10

View File

@ -0,0 +1,78 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: jeecg-boot-system-mdm
namespace: mdm-9-22
labels:
k8s.kuboard.cn/name: jeecg-boot-system-mdm
annotations:
kubesphere.io/creator: admin
spec:
replicas: 1
selector:
matchLabels:
k8s.kuboard.cn/name: jeecg-boot-system-mdm
template:
metadata:
creationTimestamp: null
labels:
k8s.kuboard.cn/name: jeecg-boot-system-mdm
annotations:
cni.projectcalico.org/ipAddrs: '["10.244.10.66"]'
kubectl.kubernetes.io/restartedAt: '2023-09-14T15:39:50+08:00'
kubesphere.io/creator: admin
kubesphere.io/restartedAt: '2023-09-25T07:53:06.004Z'
spec:
volumes:
- name: vol-mdm
persistentVolumeClaim:
claimName: pvc-mdm
containers:
- name: jeecg-boot-system-mdm
image: '172.21.13.108:80/platform-v3/jeecg-boot-system:3.1.06379-mdm'
command:
- /bin/sh
- '-c'
- >-
ifconfig eth0 hw ether 76:66:a2:d8:66:0e;ln -s
/mnt/jeecglic.properties ./config/jeecglic.properties;java
-Djava.security.egd=file:/dev/./urandom -jar
jeecg-boot-module-system-3.0.0.jar -Xms256m -Xmx2g
--spring.profiles.active=dockerdev;tail -f /dev/null
workingDir: /jeecg-boot
ports:
- name: sys
hostPort: 8081
containerPort: 8080
protocol: TCP
resources: {}
volumeMounts:
- name: vol-mdm
mountPath: /mnt
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
privileged: true
runAsUser: 0
runAsGroup: 0
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext:
seLinuxOptions: {}
affinity: {}
schedulerName: default-scheduler
hostAliases:
- ip: 10.244.10.88
hostnames:
- jeecg-boot-redis
dnsConfig: {}
serviceName: jeecg-boot-system-mdm
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
revisionHistoryLimit: 10

View File

@ -0,0 +1,24 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-mdm
namespace: mdm-9-22
labels:
name: pvc-mdm
namespace: mdm-9-22
annotations:
kubesphere.io/creator: admin
pv.kubernetes.io/bind-completed: 'yes'
pv.kubernetes.io/bound-by-controller: 'yes'
volume.beta.kubernetes.io/storage-provisioner: nfs-promethe-store
finalizers:
- kubernetes.io/pvc-protection
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 4Gi
volumeName: pvc-149e8c4e-a94d-4d8b-9ec3-6b079bde5b07
storageClassName: promethe-store
volumeMode: Filesystem

View File

@ -0,0 +1,67 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: web-mdm
namespace: mdm-9-22
labels:
k8s.kuboard.cn/name: web-mdm
annotations:
kubesphere.io/creator: admin
spec:
replicas: 1
selector:
matchLabels:
k8s.kuboard.cn/name: web-mdm
template:
metadata:
creationTimestamp: null
labels:
k8s.kuboard.cn/name: web-mdm
annotations:
kubectl.kubernetes.io/restartedAt: '2023-09-22T17:40:32+08:00'
kubesphere.io/creator: admin
spec:
volumes:
- name: vol-mdm
persistentVolumeClaim:
claimName: pvc-mdm
containers:
- name: web-mdm
image: '172.21.13.108:80/platform-v3/nginxhtml:1.2.0907-alpine'
ports:
- name: html
containerPort: 80
protocol: TCP
resources: {}
volumeMounts:
- name: vol-mdm
mountPath: /mnt
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- SYS_ADMIN
privileged: true
runAsUser: 0
runAsGroup: 0
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext:
seLinuxOptions: {}
imagePullSecrets:
- name: login
affinity: {}
schedulerName: default-scheduler
hostAliases:
- ip: 10.244.10.66
hostnames:
- jeecg-boot-system
dnsConfig: {}
serviceName: web-mdm
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
revisionHistoryLimit: 10

View File

@ -0,0 +1,45 @@
# 创建pv
失败
# 创建pvc
通过页面创建自动生成pv
# 创建redis
10.244.10.88
# 创建后端应用
10.244.10.66
## 认证不成功
命令行进入认证配置文件,填写认证信息保存(会同步到pvc).
# 创建前端应用
# service
kubectl exec web-mdm-0 -n mdm-9-22 -it /bin/sh
ifconfig eth0 hw ether 76:66:a2:d8:66:0e;
ln -s /mnt/jeecglic.properties ./config/jeecglic.properties;
java -Djava.security.egd=file:/dev/./urandom -jar jeecg-boot-module-system-3.0.0.jar -Xms256m -Xmx2g --spring.profiles.active=dockerdev;tail -f /dev/null
clusterIP: 10.96.48.151
nodeIP: 172.18.0.3\172.18.0.4\172.18.0.5
podIP: 10.244.1.9\10.244.2.9\10.244.3.9
containerPort: 8081 pod的端口可以pod之间的通讯。这个端口就是deployment中的port由于测试中使用的image的问题apache使用的80端口不可修改因此这个8081定义实际不起作用
hostPort9088 映射到pod所在主机的端口这个端口就是deployment中的port
servicePort: 8088(service中的port)
TargetPort: 80 (这个端口理论上需要和container保持一致但因为咱们实验中的镜像使用了不可改的80因此containerPort不生效为了不影响测试这个targetPort直接指向了实际listen的port)
NodePort: 30080 映射到每个主机的端口

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

56
数据库/mongodb.md Normal file
View File

@ -0,0 +1,56 @@
# 离线安装
https://www.mongodb.com/try/download/community
## 下载
```shell
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1804-4.2.25.tgz
```
## 解压
```shell
tar -zxvf mongodb-linux-x86_64-ubuntu1804-4.2.25.tgz
```
## 配置环境变量
```shell
# mongodb-install-directory 换为解压位置
# sudo cp <mongodb-install-directory>/bin/* /usr/local/bin/
sudo cp mongodb-linux-x86_64-ubuntu1804-4.2.25/bin/* /usr/local/bin/
```
## 启动
```shell
mongod --dbpath /var/lib/mongo --logpath /var/log/mongodb/mongod.log --fork
# 报错
mongod: error while loading shared libraries: libssl.so.1.1: cannot open shared object file: No such file or directory
# 全局查找文件
find / -name 'libssl.so.1.1'
# 复制到使用位置
cp /snap/core18/2812/usr/lib/x86_64-linux-gnu/libssl.so.1.1 /usr/lib/libssl.so.1.1
tail -f /var/log/mongodb/mongod.log
waiting for connections on port 27017
```
```
# 查看数据库
show dbs;
# 创建切换数据库
use testdb;
# 先创建集合,类似数据库中的表
db.createCollection("testtab")
# 查看集合表
show tables
# 插入数据
db.testdb.insertOne({"name":"菜鸟教程"})
db.testtab.insert({title: 'MongoDB 教程',
description: 'MongoDB 是一个 Nosql 数据库',
by: '教程',
url: 'http://www.baidu.com',
tags: ['mongodb', 'database', 'NoSQL'],
likes: 100
})
# 查看信息
db.testtab.find()
# 修改
db.testtab.update({'title':'MongoDB 教程'},{$set:{'title':'MongoDB MongoDB'}})
```

27
数据库/mysql/MVCC.md Normal file
View File

@ -0,0 +1,27 @@
# LBCC
Lock Base Concurrency Control 基于锁的并发控制。
# MVCC
Multi-Version Concurrency Control即多版本并发控制
生成快照read view
隐藏字段redolog
# 处理幻读
在一个事物中同时出现快照读,当前读和数据插入(另一个事物)会出现事物。
## 串行化
## 间隙锁
select * from user where age = 18 for update;
show engine innodb status\G;(需要开启参数innodb_status_output和innodb_status_output_lock)
# 锁根据索引处理
记录锁(行锁),间隙锁,临界锁

21
数据库/mysql/常用.md Normal file
View File

@ -0,0 +1,21 @@
```sql
-- 查找参数
SHOW GLOBAL VARIABLES;
SHOW GLOBAL VARIABLES like 'offline_mode';
-- 创建用户 允许任何ip连接
CREATE USER 'closeToU8'@'%' IDENTIFIED BY 'frpPurchase8*';
-- 授予权限
GRANT SELECT, update ON frp_xiongbang.f_api_address TO 'closeToU8'@'%';
FLUSH PRIVILEGES;
select * from f_api_address where remarks like '%采购%'
select * from f_api_address where id in ('11016','11006') and remarks like '%采购%'
update f_api_address set delete_flag = 1 where id in ('11016','11006');
select * from f_api_address where id in ('11016','11006');
update f_api_address set delete_flag = 0 where id in ('11016','11006');
select * from f_api_address where id in ('11016','11006');
```

View File

@ -0,0 +1,5 @@
## 连接不够
## 连接超时

View File

@ -0,0 +1,11 @@
```sql
SELECT table_name, table_type, engine FROM information_schema.tables WHERE table_schema = 'frp_xiongbang' and engine !='InnoDB' ORDER BY table_name DESC;
ALTER TABLE `f_purchase_consult_line` ENGINE = InnoDB;
```

33
数据库/mysql/索引.md Normal file
View File

@ -0,0 +1,33 @@
# Innodb
存储文件有2个表结构文件和索引数据文件索引和数据在一个文件中减少一次io。
# MySim
存储需要3个文件表结构文件索引文件和数据文件先比Innodb多一次io。
# 二叉树
1. 二叉树
2. BST树Binary Search Tree二叉搜索树
3. AVL树
4. 红黑树
# 多叉树
B-树,数据存储在节点上
B+树,数据存贮在最终叶子节点上
相同树高下B+数可以搜索更多数据。
一般情况下3-4层的B+数可以支持千万级别的数据量。
对于频繁更新的字段,最好不要添加索引。
数据迁移比较慢,可以先删除索引,数据迁移完成后,重新构建索引。
跟数据绑定的索引称为聚簇索引,没有跟数据绑定的索引称为非聚簇索引。
数据只存储一份,一个索引一个树,其他索引的叶子节点指向聚簇索引。

View File

@ -0,0 +1,43 @@
```
information_schema这张数据表保存了MySQL服务器所有数据库的信息。如数据库名数据库的表表栏的数据类型与访问权限等。再简单点这台MySQL服务器上到底有哪些数据库、各个数据库有哪些表每张表的字段类型是什么各个数据库要什么权限才能访问等等信息都保存在information_schema表里面。
```
1. `innodb_trx` 当前运行的所有事务
2. `innodb_locks` 当前出现的锁
3. `innodb_lock_waits` 锁等待的对应关系
```sql
-- 查看当前连接数
SHOW STATUS LIKE 'Threads_connected';
-- 查看系统设置连接数
SHOW VARIABLES LIKE 'max_connections';
-- 查看那个表被锁住
SHOW OPEN TABLES WHERE In_use > 0;
-- 查看哪个事物占用锁
select * from information_schema.innodb_trx;
-- 杀死进程
kill trx_mysql_thread_id;
select trx_mysql_thread_id from information_schema.innodb_trx where trx_state ='LOCK WAIT ';
-- 查看所有进程
SELECT * FROM information_schema.processlist
show full processlist;
```
```sql
-- sql 执行时间
show global variables where Variable_name = 'max_execution_time'
SET GLOBAL MAX_EXECUTION_TIME=1000;
SET SESSION MAX_EXECUTION_TIME=1000;
SELECT max_execution_time=1000 SLEEP(10), a.* from test a;
```
select * from information_schema.`COLUMNS` where TABLE_SCHEMA='platform3_dev' and COLLATION_NAME='utf8mb4_unicode_ci' and TABLE_NAME like 'bfm_ems_equipment_maintenance%' and COLUMN_NAME LIKE '%id%' limit 1000;
连接超时
```shell
The last packet successfully received from the server was 235,944,824 milliseconds ago. The last packet sent successfully to the server was 235,944,824 milliseconds ago. is longer than the server configured value of 'wait_timeout'. You should consider either expiring and/or testing connection validity before use in your application, increasing the server configured values for client timeouts, or using the Connector/J connection property 'autoReconnect=true' to avoid this problem.
```

204
数据库/mysql安装.md Normal file
View File

@ -0,0 +1,204 @@
# 下载
## 下载地址
[https://dev.mysql.com/downloads/mysql/](https://dev.mysql.com/downloads/mysql/)
## 下载版本
Linux Generic linux通用版本
```javascript
# 查看glibc版本
[root@hy-node6 ~]# ldd --version
ldd (GNU libc) 2.17
Copyright (C) 2012 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
Written by Roland McGrath and Ulrich Drepper.
# 下载
wget https://dev.mysql.com/get/Downloads/MySQL-8.1/mysql-8.1.0-linux-glibc2.17-x86_64.tar
```
![image-20230921091205221](img\image-20230921091205221.png)
# 安装
## 安装参考地址
[https://dev.mysql.com/doc/refman/8.1/en/binary-installation.html](https://dev.mysql.com/doc/refman/8.1/en/binary-installation.html)
```javascript
# 添加用户组mysql 查看用户组 cat /etc/group
groupadd mysql
# 添加用户 hy-mysql并添加到用户组mysql 查看用户 cat /etc/passw
useradd -r -g mysql -s /bin/false hy-mysql
# 解压文件
tar xvf mysql-8.1.0-linux-glibc2.17-x86_64.tar
# 解压
xz -dc mysql-8.1.0-linux-glibc2.17-x86_64.tar.xz | tar x
chmod 777 mysqld
# 设置环境变量
export PATH=$PATH:/software/mysql/mysql-8.1.0-linux-glibc2.17-x86_64/bin
# 使环境变量生效
source /etc/profile
```
# 设置
## 参考地址
[https://dev.mysql.com/doc/refman/8.1/en/postinstallation.html](https://dev.mysql.com/doc/refman/8.1/en/postinstallation.html)
## 配置文件地址
```javascript
# 备份配置文件
cp my.cnf my.cnf.backup
# 默认配置文件,不建议修改建议使用默认
vi /etc/my.cnf
# 修改文件地址
[client]
port=3306
socket=/software/mysql/socket/mysql.sock
[mysqld]
datadir=/software/mysql/datadir
basedir=/software/mysql/basedir
socket=/software/mysql/socket/mysql.sock
```
```javascript
# 设置地址授权,用于配置文件地址路径
[root@hy-node6 mysql-8.1.0-linux-glibc2.17-x86_64]# pwd
/software/mysql
[root@hy-node6 ]# mkdir -p datadir basedir socket/mysql.sock
[root@hy-node6 ]# chown hy-mysql:mysql datadir basedir socket/mysql.sock
[root@hy-node6 ]# chmod 750 datadir basedir socket/mysql.sock
[root@hy-node6 mysql-8.1.0-linux-glibc2.17-x86_64]# bin/mysqld --initialize --user=hy-mysql
2023-09-21T02:22:43.673521Z 0 [System] [MY-015017] [Server] MySQL Server Initialization - start.
2023-09-21T02:22:43.675219Z 0 [Warning] [MY-011070] [Server] 'Disabling symbolic links using --skip-symbolic-links (or equivalent) is the default. Consider not using this option as it' is deprecated and will be removed in a future release.
2023-09-21T02:22:43.675412Z 0 [System] [MY-013169] [Server] /software/mysql/mysql-8.1.0-linux-glibc2.17-x86_64/bin/mysqld (mysqld 8.1.0) initializing of server in progress as process 30526
2023-09-21T02:22:43.695144Z 1 [System] [MY-013576] [InnoDB] InnoDB initialization has started.
2023-09-21T02:22:44.540948Z 1 [System] [MY-013577] [InnoDB] InnoDB initialization has ended.
2023-09-21T02:22:52.149208Z 6 [Note] [MY-010454] [Server] A temporary password is generated for root@localhost: xa=V>dpyK7;y
2023-09-21T02:22:58.749405Z 0 [System] [MY-015018] [Server] MySQL Server Initialization - end.
```
## 使用systemctl启动
[https://dev.mysql.com/doc/mysql-secure-deployment-guide/8.0/en/secure-deployment-post-install.html#secure-deployment-startup-options](https://dev.mysql.com/doc/mysql-secure-deployment-guide/8.0/en/secure-deployment-post-install.html#secure-deployment-startup-options)
```javascript
cd /usr/lib/systemd/system
touch mysqld.service
chmod 644 mysqld.service
```
写入文件
```javascript
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=hy-mysql
Group=mysql
# Have mysqld write its state to the systemd notify socket
Type=notify
# Disable service start and stop timeout logic of systemd for mysqld service.
TimeoutSec=0
# Start main service
ExecStart=/software/mysql/mysql-8.1.0-linux-glibc2.17-x86_64/bin/mysqld --defaults-file=/etc/my.cnf $MYSQLD_OPTS
# Use this to switch malloc implementation
EnvironmentFile=-/etc/sysconfig/mysql
# Sets open_files_limit
LimitNOFILE = 10000
Restart=on-failure
RestartPreventExitStatus=1
# Set environment variable MYSQLD_PARENT_PID. This is required for restart.
Environment=MYSQLD_PARENT_PID=1
PrivateTmp=false
```
开始设置
```javascript
systemctl enable mysqld.service
systemctl start mysqld
systemctl status mysqld
```
连接设置mysql
```sql
mysql -u root -p
# 修改密码
ALTER USER 'root'@'localhost' IDENTIFIED BY '123456'; -- Sxh910911!
# 设置远程连接
CREATE USER 'root'@'127.0.0.1' IDENTIFIED BY '123456';
CREATE USER 'root'@'::1' IDENTIFIED BY '123456';
# 设置远程密码
CREATE USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '123456';
# 授予所有权限
GRANT ALL ON *.* TO 'root'@'%';
flush privileges;
```
# 遇到问题
## 一
报错Can't connect to local MySQL server through socket '/tmp/mysql.sock' (2)
方式一:
mysql -S /software/mysql/socket/mysql.sock -u root -p
方式二:
/software/mysql/socket/mysql.sock/etc/my.conf中mysqld配置的socket
设置软连接不用使用 -S /software/mysql/socket/mysql.sock
ln -s /software/mysql/socket/mysql.sock /tmp/mysql.sock
方式三:
修改配置文件/etc/my.conf 添加保证client和mysqld中socket相同
```javascript
[client]
port=3306
socket=/software/mysql/socket/mysql.sock
```
# 多实例启动
[https://dev.mysql.com/doc/refman/8.1/en/using-systemd.html](https://dev.mysql.com/doc/refman/8.1/en/using-systemd.html)
# 从机安装
复制安装包
```javascript
scp mysql-8.1.0-linux-glibc2.17-x86_64.tar.xz root@192.168.1.120:/software/mysql
root@localhost: %mfSO2t==tkF
```

File diff suppressed because one or more lines are too long