美文网首页
k8s-05-Helm

k8s-05-Helm

作者: 西海岸虎皮猫大人 | 来源:发表于2020-10-21 17:19 被阅读0次

1 概述

可配置,动态生成(通过传递变量);
类似linux的yum;

环境搭建

# 解压
tar -zxvf helm-v3.3.0-linux-amd64.tar.gz 
# 复制到 /usr/local/bin
cp linux-amd64/helm /usr/local/bin
# 赋权
chmod a+x /usr/local/bin/helm
# 新版helm无需创建tiller
# 验证
helm version
# helm仓库地址
https://hub.helm.sh/

2 Helm自定义模板

mkdir hello-world && cd hello-world
vi Chart.yaml
----------
name: hello-world
version: 1.0.0
----------
# 创建模板文件
mkdir ./templates
vi ./templates/deployment.yaml
------------------
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: hello-world
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: hello-world
    spec:
      containers:
      - name: hello-world
        image: harbor.dfun.cn/library/nginx:latest
        ports:
        - containerPort: 80
          protocol: TCP
------------------
vi templates/service.yaml
----------------
apiVersion: v1
kind: Service
metadata:
  name: hello-world
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: hello-world
----------------
# 创建
helm install . -g
# 列出已经部署的release
helm ls
# 查询特定release状态
helm status chart-1597590494
# 移除该release所有k8s资源
helm delete chart-1597590494
# 新版helm没有逻辑删除无需加 --purge
3 Helm通过变量传递配置参数
# 定义变量
vi values.yaml
--------
image:
  repository: harbor.dfun.cn/library/nginx
  tag: 'latest'
--------
# 修改配置
vi ./templates/deployment.yaml
------------
...
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
...
------------
# values.yaml中的值会被部署时覆盖
helm install --set image.tag='v2' . -g
# 更新
helm upgrade chart-1597591130 .
helm upgrade chart-1597591130 . --set image.tag='v3'
# 回滚
helm rollback chart-1597591130 1
# 尝试执行,不实际创建
helm install . --dry-run -g

4 Helm方式安装Dashboard

# 添加国内镜像
helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
# 更新仓库
helm repo update
# 拉取
helm fetch stable/kubernetes-dashboard
# 解压
tar -zxvf kubernetes-dashboard-0.6.0.tgz && cd kubernetes-dashboard
# 创建配置(误操作证明用空文件也可...)
vi kubernetes-dashboard.yaml
--------
image:  
  repository: k8s.gcr.io/kubernetes-dashboard-amd64  
  tag: v1.10.1
ingress:  
  enabled: true  
  hosts:    
  - k8s.frognew.com  
  annotations:    
    nginx.ingress.kubernetes.io/ssl-redirect: "true"    
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"  
  tls:   
  - secretName: frognew-com-tls-secret      
    hosts:      
    - k8s.frognew.com
rbac:  
  clusterAdminRole: true
--------
# 安装
helm install stable/kubernetes-dashboard \
-n kubernetes-dashboard \
--namespace kube-system  \
-f kubernetes-dashboard.yaml -g
# 查看svc
kubectl get svc -n kube-system
# 修改暴露端口
kubectl edit svc kubernetes-dashboard-1597593090 -n kube-system
# ClusterIP改为NodePort
# 查看暴露的端口
kubectl get svc -n kube-system
# 使用火狐浏览器忽略安全警告访问
# 查看token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

5 Helm部署Prometheus

相关文件:kube-prometheus.git.tar.gz | prometheus.tar.gz | load-images.sh
提取码:s90z
kube-prometheus.git.tar.gz 为源码包
prometheus.tar.gz 为相关镜像
load-images.sh 为批量导入脚本

# 克隆镜像
# git clone https://github.com/coreos/kube-prometheus.git
# 新版master没有自动创建namespace,而且克隆速度慢,这里使用源码包
# 解压kube-prometheus.git.tar.gz
cd kube-prometheus/manifests
# 以下文件分别暴露端口
vi grafana-service.yaml -> 30100
vi prometheus-service.yaml -> 30200
vi alertmanager-service.yaml -> 30300
----------------------
spec:
  type: NodePort # 添加类型
  ports:
  - name: http
    port: 3000 
    targetPort: http
    nodePort: 30100 # 添加暴露端口
---------------------
# 各node节点导入镜像,或者使用harbor
# kube-prometheus.git.tar.gz上传到root目录,解压
# 上传load-images.sh执行
sh load-images.sh
# 安装,apply如果报异常可重复执行一次
kubectl apply -f ./manifests/

# 异常pending, 提示: 0/3 nodes are available: 3 node(s) didn't match node selector.
# 是由于node节点缺少label,执行
kubectl label nodes k8s-02 kubernetes.io/os=linux
kubectl label nodes k8s-03 kubernetes.io/os=linux

# 查看node\pod信息
kubectl top node
kubectl top pod
# 查看svc
kubectl get svc --all-namespaces

# 访问prometheus, masterip:30200
Status -> targets,可以看到监控信息
# 查询各pod cpu使用情况
sum by (pod_name)( rate(container_cpu_usage_seconds_total{image!="", pod_name!=""}[1m] ) )

# 访问grafana, masterip:30100
k8s-01:30100 
# 默认账号密码 admin/admin
Configuration -> Datasources -> 点Promethus -> 点Test测试
Settings选项卡 -> import 3个模板
Dashboards -> manage > 点Kubernetes / Nodes
即可看到各节点监控指标

6.HPA测试

# hpa-example.tar, google官方提供的模拟高资源消耗的例子
# 各节点导入镜像
docker load -i hpa-example.tar
# 运行
 kubectl run php-apache --image=gcr.io/google_containers/hpa-example --requests=cpu=200m --expose --port=80 --image-pull-policy=Never
# 创建HPA控制器
kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
# 查看pod资源
top pod php-apache-7b598f6dd-x6drw
# 获取hpa资源
kubectl get hpa

# 创建镜像,循环访问php-apache
kubectl run -i --tty load-generator --image=busybox /bin/sh
while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done
# 新开窗口,查看新pod的创建情况
kubectl get pod -w
# 退出busybox,pod回缩的速度比较慢

7 namespace资源限制

I.计算资源配额
apiVersion: v1
kind: ResourceQuota
metadata:  
  name: compute-resources  
  namespace: spark-cluster
spec:  
  hard:    
    pods: "20"    
    requests.cpu: "20"    
    requests.memory: 100Gi    
    limits.cpu: "40"    
    limits.memory: 200Gi
II.对象数量配额
apiVersion: v1
kind: ResourceQuota
metadata:  
  name: object-counts  
  namespace: spark-cluster
spec:  
  hard:    
    configmaps: "10"    
    persistentvolumeclaims: "4"    
    replicationcontrollers: "20"    
    secrets: "10"    
    services: "10"    
    services.loadbalancers: "2"
III.CPU和内存LimitRange

即POD默认最大限制

apiVersion: v1
kind: LimitRange
metadata:  
  name: mem-limit-range
spec:  
  limits:  
  - default:      
    memory: 50Gi      
    cpu: 5    
  defaultRequest:      
    memory: 1Gi      
    cpu: 1    
  type: Container

8 EFK日志收集

常用方案是ELK,这里用轻量级的Fluentd(Go语言实现)替换Logtrash

# helm新版安装会报错,这里使用2.13.1版本
tar -zxvf helm-v2.13.1-linux-amd64.tar.gz
cd linux-amd4
cp helm /usr/local/bin/
# 安装tiler
vi 
----------------
apiVersion: v1
kind: ServiceAccount
metadata:  
  name: tiller  
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:  
  name: tiller
roleRef:  
  apiGroup: rbac.authorization.k8s.io  
  kind: ClusterRole  
  name: cluster-admin
subjects:  
- kind: ServiceAccount    
  name: tiller    
  namespace: kube-system
----------------
kubectl create -f rbac-config.yaml
helm init --service-account tiller --skip-refresh
# 如拉镜像失败,手动加载tiler镜像
# docker load -i helm-tiller.tar
# 查看tiler状态
kubectl get pod -n kube-system -l app=helm 

# helm添加镜像加速
helm repo add incubator http://mirror.azure.cn/kubernetes/charts-incubator/
helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts

# 添加仓库
helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
####################
# 部署es
mkdir efk && cd efk
 # 创建命名空间
kubectl create namespace efk
helm fetch incubator/elasticsearch
cd elasticsearch
# 修改配置
vi values.yaml
-----------
# 由于电脑配置考虑,这里replicas都设成1,并把master和data的persisten设成false
cluster:
  name: "elasticsearch"
...
    MINIMUM_MASTER_NODES: "1" # 修改

client:
  name: client
  replicas: 1 # 修改
...
master:
  name: master
  exposeHttp: false
  replicas: 1 # 修改
  heapSize: "512m"
  persistence:
    enabled: false # 修改
...
data:
  name: data
  exposeHttp: false
  replicas: 1 # 修改
  heapSize: "1536m"
  persistence:
    enabled: false # 修改
-----------

# 手动加载镜像
docker load -i elasticsearch-oss.tar
# 安装
helm  install --name els1 --namespace=efk -f values.yaml incubator/elasticsearch
# k8s日志位置
cd /var/log/containers/

# 测试es
# 查看ip端口
kubectl get svc -n efk
kubectl  run cirror-$RANDOM --rm -it --image=cirros -- /bin/sh
# 容器中运行
curl 10.105.28.39:9200/_cat/nodes

################
# 部署fluentd
# 添加镜像
helm repo add incubator http://mirror.azure.cn/kubernetes/charts-incubator/
# 拉取(stable/fluentd-elasticsearch拉取失败)
helm fetch incubator/fluentd-elasticsearch
tar -zxvf fluentd-elasticsearch-2.0.7.tgz 
cd fluentd-elasticsearch
# 修改配置中esip
vi values.yaml
------------
elasticsearch:
  host: '10.105.28.39'
--------------
# node节点手动安装镜像
docker load -i fluentd-elasticsearch.tar
# 安装
helm install --name flu1 --namespace=efk -f values.yaml stable/fluentd-elasticsearch
######################
# 部署kibana
helm fetch stable/kibana --version 0.14.8
# 修改es地址
vi values.yaml
-----------
files:
  kibana.yml:
...
    elasticsearch.url: 10.105.28.39:9200 # 修改
-----------
# 安装
helm install --name kib1 --namespace=efk -f values.yaml stable/kibana --version 0.14.8
# 附docker导出镜像命令
docker save -o kibana.tar docker.elastic.co/kibana/kibana-oss
kubectl get svc -n efk

相关文章

  • k8s-05-Helm

    1 概述 可配置,动态生成(通过传递变量);类似linux的yum; 环境搭建 2 Helm自定义模板 3 Hel...

网友评论

      本文标题:k8s-05-Helm

      本文链接:https://www.haomeiwen.com/subject/dpjxjktx.html