安装条件
YUM源调整
sed -e 's!^#baseurl=!baseurl=!g' \
-e 's!^mirrorlist=!#mirrorlist=!g' \
-e 's!mirror.centos.org!mirrors.ustc.edu.cn!g' \
-i /etc/yum.repos.d/CentOS-Base.repo
yum install -y epel-release
sed -e 's!^mirrorlist=!#mirrorlist=!g' \
-e 's!^#baseurl=!baseurl=!g' \
-e 's!^metalink!#metalink!g' \
-e 's!//download\.fedoraproject\.org/pub!//mirrors.ustc.edu.cn!g' \
-e 's!http://mirrors\.ustc!https://mirrors.ustc!g' \
-i /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel-testing.repo
关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
关掉网络服务
systemctl stop NetworkManager && systemctl disable NetworkManager
关闭selinux
setenforce 0
sed -i "s#=enforcing#=disabled#g" /etc/selinux/config
关闭swap
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
同步时间
yum install -y ntpdate ntp
ntpdate 0.cn.pool.ntp.org
hwclock --systohc
cat << EOF >> /etc/ntp.conf
driftfile /var/lib/ntp/drift
server 0.cn.pool.ntp.org
server 1.cn.pool.ntp.org
server 2.cn.pool.ntp.org
server 3.cn.pool.ntp.org
EOF
systemctl enable --now ntpd
ntpq -p
系统参数调整
cat <<EOF > /etc/sysctl.d/k8s.conf
# https://github.com/moby/moby/issues/31208
# ipvsadm -l --timout
# 修复ipvs模式下长连接timeout问题 小于900即可
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system
如果遇到
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
是因为没有加载ipv6模块,可以使用 modprobe br_netfilter
设置节点主机名解析
cat << EOF >> /etc/hosts
10.213.120.72 node1
10.213.120.71 node2
10.213.120.70 node3
10.213.120.69 node4
EOF
启用ipvs
yum install ipvsadm ipset sysstat conntrack libseccomp -y
开机自启动加载ipvs内核
:> /etc/modules-load.d/ipvs.conf
module=(
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
br_netfilter
)
for kernel_module in ${module[@]};do
/sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
done
systemctl enable --now systemd-modules-load.service
安装docker-ce
curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
sed -i 's#download.docker.com#mirrors.ustc.edu.cn/docker-ce#g' /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce bash-completion
cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
mkdir /etc/docker
cat >> /etc/docker/daemon.json <<EOF
{
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
},
"live-restore": true,
"max-concurrent-downloads": 10,
"max-concurrent-uploads": 10,
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.mirrors.ustc.edu.cn/"
]
}
EOF
systemctl enable --now docker
可以用官网提供的docker
环境检查脚本来检查系统内核和模块是否适合运行docker
curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh
bash ./check-config.sh
升级内核
可选, Centos 7 符合docker要求的最低内核版本,建议升级到最新,否则运行一段时间会导致服务器假死的情况
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available --showduplicates | grep -Po '^kernel-ml.x86_64\s+\K\S+(?=.el7)'
yum --disablerepo="*" --enablerepo=elrepo-kernel install -y kernel-ml{,-devel}
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --default-kernel
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
免密登录其他节点
在ks8-m1
操作
yum install sshpass -y
ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa
for NODE in k8s-m1 k8s-m2 k8s-m3 k8s-n1 k8s-n2; do
echo "--- $NODE ---"
sshpass -p 123456 ssh-copy-id -o "StrictHostKeyChecking no" -i /root/.ssh/id_rsa.pub ${NODE}
ssh ${NODE} "hostnamectl set-hostname ${NODE}"
done
其中123456
是服务器的密码
如果不打算关闭防火墙,则需要按照"端口要求"文档打开相关端口。
Service | Protocol | Action | Start Port | End Port | Notes |
---|---|---|---|---|---|
ssh | TCP | allow | 22 | ||
etcd | TCP | allow | 2379 | 2380 | |
apiserver | TCP | allow | 6443 | ||
calico | TCP | allow | 9099 | 9100 | |
bgp | TCP | allow | 179 | ||
nodeport | TCP | allow | 30000 | 32767 | |
master | TCP | allow | 10250 | 10258 | |
dns | TCP | allow | 53 | ||
dns | UDP | allow | 53 | ||
local-registry | TCP | allow | 5000 | For offline environment | |
local-apt | TCP | allow | 5080 | For offline environment | |
rpcbind | TCP | allow | 111 | Required if NFS is used | |
ipip | IPENCAP / IPIP | allow | Calico needs to allow the ipip protocol |
第 1 步:准备 Linux 计算机
请参阅以下硬件和操作系统的要求。
硬件推荐
System | Minimum Requirements |
---|---|
Ubuntu 16.04, 18.04 | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
Debian Buster, Stretch | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
CentOS 7.x | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
Red Hat Enterprise Linux 7 | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
SUSE Linux Enterprise Server 15/openSUSE Leap 15.2 | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
注意
上述系统要求和以下说明用于默认最小安装,且未启用任何可选组件。如果计算机至少有 8 个内核和 16G 内存,建议您启用所有组件。
依赖项要求
库贝基可以一起安装库贝内特和库贝球。需要安装的依赖项可能根据要安装的 Kubernetes 版本而有所不同。您可以参考下面的列表,以查看是否需要提前在节点上安装相关依赖项。
ependency | Kubernetes Version ≥ 1.18 | Kubernetes Version < 1.18 |
---|---|---|
socat |
Required | Optional but recommended |
conntrack |
Required | Optional but recommended |
ebtables |
Optional but recommended | Optional but recommended |
ipset |
Optional but recommended | Optional but recommended |
KubeKey 以 Go 语言开发,代表全新的安装工具,以替代以前使用的基于易用的安装程序。KubeKey 为用户提供了灵活的安装选择,因为他们可以单独安装 KubeSphere 和 Kubernetes,或同时安装它们,这非常方便和高效。
yum install -y socat
yum install -y conntrack-tools
注意:如果不安装该工具在安装时会报错
网络和 DNS 要求
- 确保 中的 DNS 地址可用。否则,可能会导致群集中的 DNS 问题。
/etc/resolv.conf
- 如果您的网络配置使用防火墙或安全组,则必须确保基础结构组件可以通过特定端口相互通信。建议您关闭防火墙或按照指南"网络访问"操作。
提示
- 建议您的操作系统干净(未安装任何其他软件)。否则,可能会发生冲突。
第 2 步:下载 KubeKey
使用以下命令下载 KubeKey:
wget -c https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz -O - | tar -xz
制作可执行:kk
chmod +x kk
第 3 步:开始安装
在此快速入门教程中,您只需要执行一个命令进行安装,其模板如下所示:
./kk create cluster [--with-kubernetes version] [--with-kubesphere version]
使用已安装的 KubeSphere 创建 Kubernetes 群集。下面是一个供您参考的示例:
./kk create cluster --with-kubernetes v1.18.6 --with-kubesphere v3.0.0
通过配置文件安装
./kk create cluster -f config-sample.yaml
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: example
spec:
hosts:
- {name: node1, address: 172.16.0.2, internalAddress: 172.16.0.2, port: 8022, user: ubuntu, password: Qcloud@123} # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above
- {name: node2, address: 172.16.0.3, internalAddress: 172.16.0.3, password: Qcloud@123} # the default root user
- {name: node3, address: 172.16.0.4, internalAddress: 172.16.0.4, privateKeyPath: "~/.ssh/id_rsa"} # password-less login with SSH keys
roleGroups:
etcd:
- node1
master:
- node1
- node[2:10] # the nodes from node2, node3,..., to node10
worker:
- node1
- node[10:100]
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: "6443"
kubernetes:
version: v1.17.9
imageRepo: kubesphere
clusterName: cluster.local
masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false]
maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110]
nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24]
proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs]
network:
plugin: calico
calico:
ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always]
vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never]
vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440]
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
privateRegistry: ""
addons: []
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.0.0
spec:
local_registry: ""
persistence:
storageClass: ""
authentication:
jwtSecret: ""
etcd:
monitoring: true # Whether to install etcd monitoring dashboard
endpointIps: 192.168.0.7,192.168.0.8,192.168.0.9 # etcd cluster endpointIps
port: 2379 # etcd port
tlsEnable: true
common:
mysqlVolumeSize: 20Gi # MySQL PVC size
minioVolumeSize: 20Gi # Minio PVC size
etcdVolumeSize: 20Gi # etcd PVC size
openldapVolumeSize: 2Gi # openldap PVC size
redisVolumSize: 2Gi # Redis PVC size
es: # Storage backend for logging, tracing, events and auditing.
elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
elasticsearchDataReplicas: 1 # total number of data nodes
elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log
# externalElasticsearchUrl:
# externalElasticsearchPort:
console:
enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time.
port: 30880
alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
enabled: false
auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants.
enabled: false
devops: # Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image
enabled: false
jenkinsMemoryLim: 2Gi # Jenkins memory limit
jenkinsMemoryReq: 1500Mi # Jenkins memory request
jenkinsVolumeSize: 8Gi # Jenkins volume size
jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
enabled: false
logging: # Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: false
logsidecarReplicas: 2
metrics_server: # Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler).
enabled: true
monitoring: #
prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well.
prometheusMemoryRequest: 400Mi # Prometheus request memory
prometheusVolumeSize: 20Gi # Prometheus PVC size
alertmanagerReplicas: 1 # AlertManager Replicas
multicluster:
clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster
networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
enabled: false
notification: # Email Notification support for the legacy alerting system, should be enabled/disabled together with the above alerting option
enabled: false
openpitrix: # Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management
enabled: false
servicemesh: # Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology
enabled: false
贴一下我的安装
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: k8sinstall
spec:
hosts:
- {name: node1, address: 10.213.120.72, internalAddress: 10.213.120.72, password: uio##465 }
- {name: node2, address: 10.213.120.71, internalAddress: 10.213.120.71, password: uio##465 } # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above
- {name: node3, address: 10.213.120.70, internalAddress: 10.213.120.70, password: uio##465 } # the default root user
- {name: node4, address: 10.213.120.69, internalAddress: 10.213.120.69, password: uio##465 } # password-less login with SSH keys
roleGroups:
etcd:
- node1
- node2
- node3
master:
- node1
- node[2:3] # the nodes from node2, node3,..., to node10
worker:
- node1
- node2
- node3
- node4
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: "6443"
kubernetes:
version: v1.18.6
imageRepo: kubesphere
clusterName: cluster.local
masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false]
maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110]
nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24]
proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs]
network:
plugin: calico
calico:
ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always]
vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never]
vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440]
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
privateRegistry: ""
addons: []
注意
- 支持的库伯内特版本: v1.15.12, v1.16.13, v1.17.9 (默认), v1.18.6.
- 对于一个一对一的安装,一般来说,您不需要更改任何配置。
- 默认情况下,KubeKey 将安装 OpenEBS来为开发和测试环境预配 LocalPV,这对于新用户来说非常方便。有关其他存储类,请参阅持久存储配置。
执行该命令后,将看到如下所示的表,用于环境检查。
确保已安装上述标记的组件并输入以继续。y
第 4 步:验证安装
./kk create cluster -f config-sample.yaml
当您看到输出如下时,这意味着安装完成。
输入以下命令以检查结果。
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
输出显示 Web 控制台的 IP 地址和端口号,默认情况下,该地址和端口号通过 Web 控制台公开。现在,您可以使用默认帐户和密码 () 访问控制台。NodePort 30880``EIP:30880``admin/P@88word
#####################################################
### Welcome to KubeSphere! ###
#####################################################
Console: http://192.168.0.2:30880
Account: admin
Password: P@88w0rd
NOTES:
1. After logging into the console, please check the
monitoring status of service components in
the "Cluster Management". If any service is not
ready, please wait patiently until all components
are ready.
2. Please modify the default password after login.
#####################################################
https://kubesphere.io 20xx-xx-xx xx:xx:xx
#####################################################
注意
您可能需要在环境中绑定 EIP 并配置端口转发,以便外部用户访问控制台。此外,请确保端口 30880 在安全组中打开。
登录到控制台后,可以在组件 中检查不同组件的状态。如果要使用相关服务,可能需要等待某些组件启动并运行。还可以使用 来检查 KubeSphere 工作负载的运行状态。kubectl get pod --all-namespaces
添加节点
在使用 KubeSphere 一段时间后,可能需要使用越来越多的工作负载来扩展群集。在这种情况下,KubeSphere 提供向群集添加新节点的脚本。
使用 KubeKey 创建配置文件
./kk create config --with-kubesphere --with-kubernetes V1.18.6
以下部分演示如何添加两个节点(即 和 )使用用户作为示例。假定第一台计算机的主机名为(将以下主机名替换为您的主机名)。node5``node6``root``master7
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: k8sinstall
spec:
hosts:
- {name: node1, address: 10.213.120.72, internalAddress: 10.213.120.72, password: uio##465 }
- {name: node2, address: 10.213.120.71, internalAddress: 10.213.120.71, password: uio##465 } # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above
- {name: node3, address: 10.213.120.70, internalAddress: 10.213.120.70, password: uio##465 } # the default root user
- {name: node4, address: 10.213.120.69, internalAddress: 10.213.120.69, password: uio##465 } # password-less login with SSH keys
- {name: node5, address: 10.213.120.77, internalAddress: 10.213.120.77, password: uio##465 }
- {name: node6, address: 10.213.120.78, internalAddress: 10.213.120.78, password: uio##465 } # Assume that the default port for SSH is 22, otherwise add the port number after the IP address as above
- {name: node7, address: 10.213.120.79, internalAddress: 10.213.120.79, password: uio##465 } # the default root user
roleGroups:
etcd:
- node1
- node2
- node3
master:
- node1
- node[2:3] # the nodes from node2, node3,..., to node10
worker:
- node1
- node2
- node3
- node4
- node5
- node6
- node7
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: "6443"
kubernetes:
version: v1.18.6
imageRepo: kubesphere
clusterName: cluster.local
masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false]
maxPods: 110 # maxPods is the number of pods that can run on this Kubelet. [Default: 110]
nodeCidrMaskSize: 24 # internal network node size allocation. This is the size allocated to each node on your network. [Default: 24]
proxyMode: ipvs # mode specifies which proxy mode to use. [Default: ipvs]
network:
plugin: calico
calico:
ipipMode: Always # IPIP Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, vxlanMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Always]
vxlanMode: Never # VXLAN Mode to use for the IPv4 POOL created at start up. If set to a value other than Never, ipipMode should be set to "Never". [Always | CrossSubnet | Never] [Default: Never]
vethMTU: 1440 # The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. [Default: 1440]
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
privateRegistry: ""
addons: []
执行以下命令以应用更改:
./kk add nodes -f config-sample.yaml
最后,在成功返回后,您将能够在 KubeSphere 控制台中看到新节点及其信息。从**左侧菜单****中选择"**节点"下的群集节点,或使用命令查看更改。kubectl get node
kubectl get node
NAME STATUS ROLES AGE VERSION
master1 Ready master,worker 20d v1.17.9
node1 Ready worker 31h v1.17.9
node2 Ready worker 31h v1.17.9
删除节点
您可以通过以下命令删除节点:
./kk delete node <nodeName> -f config-sample.yaml
删除集群
./kk delete cluster
如果从高级模式开始(使用配置文件删除):
./kk delete cluster [-f config-sample.yaml]
启用可插拔组件(可选)
默认情况下,上述指南仅用于最小安装。
开启应用商店
通过编辑配置文件开启
vi cluster-configuration.yaml
修改配置文件openpitrix改为true
openpitrix:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml
或者通过kubectl开启应用商店
kubectl -n kubesphere-system edit cc ks-installer
打开安装的配置文件后开启应用商店
openpitrix:
enabled: true # Change "false" to "true"
开启DevOps
通过编辑配置文件开启
vi cluster-configuration.yaml
修改配置文件openpitrix改为true
devops:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml
或者通过kubectl开启
kubectl -n kubesphere-system edit cc ks-installer
打开安装的配置文件后开启店
devops:
enabled: true # Change "false" to "true"
查看是否安装日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
开启日志 记录
通过编辑配置文件开启
vi cluster-configuration.yaml
修改配置文件openpitrix改为true
logging:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml
或者通过kubectl开启,这种方式开完保存即可,自动应用
kubectl -n kubesphere-system edit cc ks-installer
打开安装的配置文件后开启
logging:
enabled: true # Change "false" to "true"
默认情况下,如果启用日志记录,ks 安装程序将在内部安装弹性搜索。对于生产环境,如果您要启用日志记录,特别是 和 ,强烈建议您在群集配置**.yaml**中设置以下值。
es: # Storage backend for logging, tracing, events and auditing.
elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
elasticsearchDataReplicas: 1 # total number of data nodes
elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log
externalElasticsearchUrl: # The URL of external Elasticsearch
externalElasticsearchPort: # The port of external Elasticsearch
开启Service Mesh
通过编辑配置文件开启
vi cluster-configuration.yaml
或者通过kubectl开启,这种方式开完保存即可,自动应用
kubectl -n kubesphere-system edit cc ks-installer
修改配置文件openpitrix改为true
servicemesh:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml
开启监控告警和提醒
通过编辑配置文件开启
vi cluster-configuration.yaml
或者通过kubectl开启,这种方式开完保存即可,自动应用
kubectl -n kubesphere-system edit cc ks-installer
修改配置文件openpitrix改为true
alerting:
enabled: true # Change "false" to "true"
notification:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml
开启网络策略
通过编辑配置文件开启
vi cluster-configuration.yaml
或者通过kubectl开启,这种方式开完保存即可,自动应用
kubectl -n kubesphere-system edit cc ks-installer
修改配置文件openpitrix改为true
networkpolicy:
enabled: true # Change "false" to "true"
应用修改
kubectl apply -f cluster-configuration.yaml