在使用kubesphere v3.0
的过程中,我们发现,随着应用的不断构建。会产生大量的B2I
任务记录,并且minio内上传的程序包也会有所堆积。
B2I
任务记录
这些内对于运维管理是很大的挑战,很多时候我们并不需要全部保存(如构建历史)。因此我们开发了ks-cleaner
用于GC构建记录与minio中无用介质。
kind: CronJob
apiVersion: batch/v1beta1
metadata:
name: ks-cleaner-job
labels:
app: ks-cleaner-job
spec:
schedule: "*/1 * * * *"
concurrencyPolicy: Forbid
suspend: false
jobTemplate:
metadata:
labels:
app: ks-cleaner-job
spec:
parallelism: 1
completions: 1
activeDeadlineSeconds: 30
backoffLimit: 5
template:
spec:
volumes:
- name: host-time
hostPath:
path: /etc/localtime
type: ''
- name: volume-m8778k
configMap:
name: ks-cleaner-cm
defaultMode: 420
containers:
- name: container-v89pxg
image: harbor.wl.io/paas/ks-cleaner
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: host-time
readOnly: true
mountPath: /etc/localtime
- name: volume-m8778k
readOnly: true
mountPath: /etc/cleaner-config.yaml
subPath: cleaner-config.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: Always
restartPolicy: Never
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: default
serviceAccount: default
securityContext: {}
schedulerName: default-scheduler
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
ks-cleaner
本质是一个周期性任务,它会周期性清理B2I的构建记录及minio内的过期介质,为系统瘦身。
构建记录清理核心实现:
package gc
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"time"
)
type S2iItem struct {
Metadata struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
CreationTimestamp time.Time `json:"creationTimestamp"`
}
}
type S2iItemList struct {
Items []S2iItem `json:"items"`
TotalItems int `json:"totalItems"`
}
func (conf Config) doRequest(req *http.Request) (*http.Response, error) {
req.SetBasicAuth(conf.Kubesphere.Username, conf.Kubesphere.Password)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
cli := &http.Client{}
resp, err := cli.Do(req)
if err != nil {
return resp, err
}
return resp, nil
}
func (conf Config) getS2iItemList(namespace string) (items []S2iItem) {
page := 0
for {
page++
s2iItemList := S2iItemList{}
url := fmt.Sprintf("%s/kapis/resources.kubesphere.io/v1alpha3/namespaces/%s/s2ibuilders?limit=10&page=%d&sortBy=createTime",
conf.Kubesphere.Endpoint, namespace, page)
log.Printf("请求: %s\n", url)
req, err := http.NewRequest("GET", url, nil)
resp, err := conf.doRequest(req)
if err != nil {
log.Println("分页越界返回...")
return items
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("读取response失败 -> %s\n", err.Error())
//log.Println(err)
}
_ = json.Unmarshal(body, &s2iItemList)
if s2iItemList.TotalItems == 0 {
return items
}
for _, v := range s2iItemList.Items {
items = append(items, v)
}
}
}
func (conf Config) deleteS2iItem(namespace, name string) {
url := fmt.Sprintf("%s/apis/devops.kubesphere.io/v1alpha1/namespaces/%s/s2ibuilders/%s",
conf.Kubesphere.Endpoint, namespace, name)
req, err := http.NewRequest(http.MethodDelete, url, nil)
if err != nil {
log.Println(err)
}
log.Printf("删除%s下的s2i job: %s\n", namespace, name)
conf.doRequest(req)
}
func CleanS2iJob(c Config) {
log.Println("检测s2i需要清理的job记录...")
//var items []S2iItem
for _, v := range c.JobHistory.Namespaces {
items := c.getS2iItemList(v)
log.Printf("命名空间:%s下,共计%d条构建记录\n", v, len(items))
for _, i := range items {
expire := time.Now().AddDate(0, 0, -c.JobHistory.PreserveDays)
if i.Metadata.CreationTimestamp.Before(expire) {
c.deleteS2iItem(i.Metadata.Namespace, i.Metadata.Name)
log.Printf("过期job %s/%s -> %s\n", i.Metadata.Namespace, i.Metadata.Name, i.Metadata.CreationTimestamp.String())
} else {
expireAt := i.Metadata.CreationTimestamp.AddDate(0, 0, c.JobHistory.PreserveDays).Local()
log.Printf("job: %s/%s 过期时间为 -> %s,跳过清理任务...\n", i.Metadata.Namespace, i.Metadata.Name, expireAt)
}
}
}
}
minio内过期介质清理核心实现:
package gc
import (
"context"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"log"
"time"
)
func (conf Config) s3client() *minio.Client {
endpoint := conf.S2IBinaries.Minio.Address
accessKeyID := conf.S2IBinaries.Minio.Ak
secretAccessKey := conf.S2IBinaries.Minio.Sk
useSSL := false
// Initialize minio client object.
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
if err != nil {
log.Fatalln(err)
}
log.Printf("%#v\n", minioClient) // minioClient is now set up
return minioClient
}
func CleanS3(config Config) {
if config.S2IBinaries.PeriodClean == false {
return
}
log.Println("检测s3需要清理的制品...")
client := config.s3client()
bucket := config.S2IBinaries.Minio.Bucket
exists, err := client.BucketExists(context.TODO(), bucket)
if !exists {
log.Fatalf("Bucket: %s doesn't exist.", bucket)
}
if err != nil {
log.Fatalln(err)
}
// 检查过期时间...
items := client.ListObjects(context.TODO(), bucket, minio.ListObjectsOptions{})
log.Printf("minio下%s桶,共计%d个文件\n", config.S2IBinaries.Minio.Bucket, len(items))
for object := range items {
before := time.Now().AddDate(0, 0, -config.S2IBinaries.PreserveDays)
needClean := object.LastModified.Before(before)
if needClean {
log.Printf("try to delete %s...\n", object.Key)
err := client.RemoveObject(context.TODO(), bucket, object.Key, minio.RemoveObjectOptions{})
if err != nil {
log.Println(err)
}
} else {
expireAt := object.LastModified.AddDate(0, 0, config.S2IBinaries.PreserveDays).Local()
log.Printf("%s过期时间为 -> %s 跳过清理任务...\n", object.Key, expireAt)
}
}
// clean
}
定时任务展示
image.png
网友评论