安装 HELM
1
2
3
|
wget https://get.helm.sh/helm-v3.8.1-linux-amd64.tar.gz
tar -zxvf helm-v3.8.1-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin && chmod +x /usr/local/bin/helm
|
https://helm.sh/zh/docs/intro/install/
添加仓库
1
|
helm repo add my-repo https://charts.bitnami.com/bitnami
|
HELM 安装服务
helm ingress
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--set controller.image.registry=registry.aliyuncs.com/google_containers \
--namespace ingress-nginx --create-namespace
cat <<EOF > ingress-nginx-values.yaml
controller:
image:
registry: registry.cn-shanghai.aliyuncs.com/viper
image: ingress-nginx-controller
digest: sha256:bc30cb296e7548162afd9601f6b96261dcca8263e05b962694d1686b4d5a9584
watchIngressWithoutClass: true
hostNetwork: true
hostPort:
enabled: true
ports:
http: 80
https: 443
kind: DaemonSet
nodeSelector:
ingress: "true"
admissionWebhooks:
patch:
image:
registry: registry.cn-shanghai.aliyuncs.com/viper
image: kube-webhook-certgen
digest: sha256:78351fc9d9b5f835e0809921c029208faeb7fbb6dc2d3b0d1db0a6584195cfed
EOF
|
helm Nginx
1
2
|
helm install nginx bitnami/nginx --set replicaCount=3
helm upgrade nginx bitnami/nginx --set replicaCount=2
|
helm Redis
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
# standalone
helm install redis --set global.storageClass=nfs-csi \
--set architecture=standalone \
--set auth.password=PASSWORD \
--set replica.persistence.storageClass=nfs-csi \
bitnami/redis
# svc domain redis-master.default.svc.cluster.local
# replication
helm install redis --set global.storageClass=nfs-csi \
--set architecture=replication
--set auth.password=PASSWORD \
--set replica.persistence.storageClass=nfs-csi \
--set replica.replicaCount=2 \
bitnami/redis
# autoscaling 内容未测试
# --set auth.enabled=false \
redis-master.default.svc.cluster.local for read/write operations (port 6379)
redis-replicas.default.svc.cluster.local for read-only operations (port 6379)
|
https://artifacthub.io/packages/helm/bitnami/redis
helm MySQL
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
# standalone
helm install mysql \
--set global.storageClass=nfs-csi \
--set auth.rootPassword=password \
--set auth.database=movie \
--set auth.username=movie \
--set auth.password=password \
bitnami/mysql
# replication
helm install mysql \
--set global.storageClass=nfs-csi \
--set auth.rootPassword=PASSWORD \
--set auth.database=movie \
--set auth.username=movie \
--set auth.password=PASSWORD \
--set architecture=replication \
--set secondary.replicaCount=2 \
bitnami/mysql
# 权限
# volumePermissions.enabled
echo Primary: mysql-primary.default.svc.cluster.local:3306
echo Secondary: mysql-secondary.default.svc.cluster.local:3306
|
https://artifacthub.io/packages/helm/bitnami/mysql
helm kafka
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
|
helm install kafka bitnami/kafka
$ helm install kafka \
--set persistence.storageClas="nfs-kafka" \
--set zookeeper.persistence.storageClass="nfs-zookeeper" \
bitnami/kafka
kafka.default.svc.cluster.local
# 一直删不掉的pvc
kubectl patch pvc data-elasticsearch-data-0 -p ‘{"metadata":{"finalizers":null}}‘
kubectl delete pod [pod name] --force --grace-period=0 -n [namespace]
kubectl delete pod
# ============================================================================================= #
helm install kafka --set service.type=LoadBalancer \
--set replicaCount=3 \
--set zookeeper.replicaCount=2 \
bitnami/kafka
# ---------------------------------------------------------------------------------------------
NAME: kafka
LAST DEPLOYED: Tue Mar 8 15:55:06 2022
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: kafka
CHART VERSION: 15.0.4
APP VERSION: 3.1.0
---------------------------------------------------------------------------------------------
WARNING
By specifying "serviceType=LoadBalancer" and not configuring the authentication
you have most likely exposed the Kafka service externally without any
authentication mechanism.
For security reasons, we strongly suggest that you switch to "ClusterIP" or
"NodePort". As alternative, you can also configure the Kafka authentication.
---------------------------------------------------------------------------------------------
** Please be patient while the chart is being deployed **
Kafka can be accessed by consumers via port 9092 on the following DNS name from within your cluster:
kafka.default.svc.cluster.local
Each Kafka broker can be accessed by producers via port 9092 on the following DNS name(s) from within your cluster:
kafka-0.kafka-headless.default.svc.cluster.local:9092
kafka-1.kafka-headless.default.svc.cluster.local:9092
kafka-2.kafka-headless.default.svc.cluster.local:9092
To create a pod that you can use as a Kafka client run the following commands:
kubectl run kafka-client --restart='Never' --image docker.io/bitnami/kafka:3.1.0-debian-10-r8 --namespace default --command -- sleep infinity
kubectl exec --tty -i kafka-client --namespace default -- bash
PRODUCER:
kafka-console-producer.sh \
--broker-list kafka-0.kafka-headless.default.svc.cluster.local:9092,kafka-1.kafka-headless.default.svc.cluster.local:9092,kafka-2.kafka-headless.default.svc.cluster.local:9092 \
--topic test
CONSUMER:
kafka-console-consumer.sh \
--bootstrap-server kafka.default.svc.cluster.local:9092 \
--topic test \
--from-beginning
# ---------------------------------------------------------------------------------------------
# test python
kubectl run python --restart='Never' --image python --namespace default --command -- sleep infinity
$ kafka-topics.sh --bootstrap-server kafka.default.svc.cluster.local:9092 --create --topic order --replication-factor 3 --partitions 3
$ kafka-topics.sh --bootstrap-server kafka.default.svc.cluster.local:9092 --describe --topic order
# 查看消费
$ kafka-consumer-groups.sh --bootstrap-server kafka.default.svc.cluster.local:9092 --list
# 消息堆积 (LAG表示还未消费的数量)
$ kafka-consumer-groups.sh --bootstrap-server kafka.default.svc.cluster.local:9092 --describe --group consumer_order
GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID
consumer_order order 0 273588 849962 576374 kafka-python-2.0.2-58bc1472-0c9e-4132-928f-6fcde512de74 /10.244.1.13 kafka-python-2.0.2
consumer_order order 2 305624 852986 547362 kafka-python-2.0.2-b9e0494e-3144-437d-918a-f35cd5747f95 /10.244.1.13 kafka-python-2.0.2
consumer_order order 1 277358 852664 575306 kafka-python-2.0.2-aa3074d8-27fc-41f6-8a5e-ed07aaf5729d /10.244.1.13 kafka-python-2.0.2
# 调整分区数(貌似只有当LAG = 0的时候才会增加,partitions 只能调大,不能调小)
$ kafka-topics.sh --bootstrap-server kafka.default.svc.cluster.local:9092 --alter --topic order --partitions 8
# 消费端
kubectl run kafka-producer-0 --restart='Never' --image python --namespace default --command -- sleep infinity
kubectl run kafka-consumer-0 --restart='Never' --image python --namespace default --command -- sleep infinity
kubectl run kafka-consumer-1 --restart='Never' --image python --namespace default --command -- sleep infinity
|
kafka auth
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
# kafka-generate-ssl.sh
# https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh
kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./truststore/kafka.truststore.jks --from-file=kafka.keystore.jks=./keystore/kafka.keystore.jks
kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./truststore/kafka.truststore.jks --from-file=kafka.keystore.jks=./keystore/kafka.keystore.jks
# 测试失败
$ helm install kafka2 \
--set service.type=LoadBalancer \
--set replicaCount=1 \
--set auth.clientProtocol=tls \
--set auth.tls.existingSecrets[0]=kafka-jks-0 \
--set auth.tls.password=jksPassword \
bitnami/kafka
|
helm ElasticSearch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install elasticsearch bitnami/elasticsearch
$ helm install elasticsearch \
--set name="logging" \
--set global.coordinating.name="coordinating-only" \
--set security.enabled=true \
--set security.tls.autoGenerated=true \
--set security.elasticPassword="PASSWORD" \
bitnami/elasticsearch --version 17.9.2
# 启用 安全后 还需要配置 kibana的证书文件,hhh
# --set global.kibanaEnabled=true \
# --set kibana.elasticsearch.host=[https://elasticsearch-coordinating-only:9200/]
# 每台机器执行
sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536
$ kubectl port-forward --namespace default svc/elasticsearch-coordinating-only 9200:9200 &
$ curl http://127.0.0.1:9200/
$ curl --user kibana_system:PASSWORD -k 'https://localhost:9200/'
|
helm Harbor
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
|
# 导入证书 参考 https://blog.csdn.net/zhiboqingyun/article/details/123117435
$ kubectl create secret tls cr.labdoc.cc --key 7535015_cr.labdoc.cc_nginx/7535015_cr.labdoc.cc.key --cert 7535015_cr.labdoc.cc_nginx/7535015_cr.labdoc.cc.pem
$ kubectl get secret cr.labdoc.cc
helm install harbor --set global.storageClass=nfs-csi \
--set adminPassword=PASSWORD \
--set externalURL=https://cr.labdoc.cc \
--set core.secretName=cr.labdoc.cc \
--set ingress.core.hostname=cr.labdoc.cc \
--set nginx.tls.existingSecret=cr.labdoc.cc \
bitnami/harbor
ingress.core.hostname
** Please be patient while the chart is being deployed **
1. Get the Harbor URL:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace default -w harbor'
export SERVICE_IP=$(kubectl get svc --namespace default harbor --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}")
echo "Harbor URL: http://$SERVICE_IP/"
2. Login with the following credentials to see your Harbor application
echo Username: "admin"
echo Password: $(kubectl get secret --namespace default harbor-core-envvars -o jsonpath="{.data.HARBOR_ADMIN_PASSWORD}" | base64 --decode)
Password: PASSWORD
# 代理 https://www.modb.pro/db/117823
添加目标 docker
添加项目:镜像
# bitnami/redis
docker pull cr.labdoc.cc/docker/bitnami/redis
# library 代表 _/nginx 即 docker pull nginx
docker pull cr.labdoc.cc/docker/library/nginx
|
helm Jenkins
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
helm install jenkins --namespace devops \
--set image.tag="latest" \
--set global.storageClass=nfs-csi \
--set persistence.storageClass=nfs-csi \
--set jenkinsUser=admin \
--set jenkinsPassword=PASSWORD \
bitnami/jenkins
# 期望输出
1. Get the Jenkins URL by running:
** Please ensure an external IP is associated to the jenkins service before proceeding **
** Watch the status using: kubectl get svc --namespace default -w jenkins **
export SERVICE_IP=$(kubectl get svc --namespace default jenkins --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}")
echo "Jenkins URL: http://$SERVICE_IP/"
2. Login with the following credentials
echo Username: admin
echo Password: $(kubectl get secret --namespace default jenkins -o jsonpath="{.data.jenkins-password}" | base64 --decode)
ju4t@Mac Downloads % echo Password: $(kubectl get secret --namespace default jenkins -o jsonpath="{.data.jenkins-password}" | base64 --decode)
|
helm gitlab
1
2
3
4
5
|
$ helm repo add gitlab https://charts.gitlab.io/
$ helm install gitlab gitlab/gitlab \
--set global.hosts.domain=git.labdoc.cc \
--set certmanager-issuer.email=admin@labdoc.cc
|
https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/doc/quickstart/index.md