first commit
56
CloudNative/Docker/Docker启动异常处理.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
** docker使用overlay2无法启动 **
|
||||
docker配置如下:
|
||||
```bash
|
||||
cat /etc/docker/daemon.json
|
||||
```
|
||||
```json
|
||||
{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"storage-driver": "overlay2",
|
||||
"storage-opts": [
|
||||
"overlay2.override_kernel_check=true"
|
||||
],
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m",
|
||||
"max-file": "3"
|
||||
}
|
||||
}
|
||||
```
|
||||
报错提示如下:
|
||||
```
|
||||
Mar 04 18:16:16 harbor.scm.360.back kernel: overlayfs: upper fs needs to support d_type.
|
||||
Mar 04 18:16:16 harbor.scm.360.back dockerd[3726]: failed to start daemon: error initializing graphdriver: overlay2: the backing xfs filesystem is formatted without d_type support, which leads to incorrect behavior. Reformat the filesystem with ftype=1 to enable d_type support. Backing filesystems without d_type support are not supported.
|
||||
Mar 04 18:16:16 harbor.scm.360.back systemd[1]: docker.service: main process exited, code=exited, status=1/FAILURE
|
||||
Mar 04 18:16:16 harbor.scm.360.back systemd[1]: Failed to start Docker Application Container Engine.
|
||||
-- Subject: Unit docker.service has failed
|
||||
-- Defined-By: systemd
|
||||
-- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
|
||||
--
|
||||
-- Unit docker.service has failed.
|
||||
--
|
||||
-- The result is failed.
|
||||
```
|
||||
使用命令
|
||||
```
|
||||
xfs_info /
|
||||
```
|
||||
打印的信息中
|
||||
|
||||
** ftype=0 **
|
||||
|
||||
除非重新格式化硬盘,不然无法使用
|
||||
|
||||
|
||||
** 修改配置文件 **
|
||||
```json
|
||||
{
|
||||
"exec-opts": ["native.cgroupdriver=systemd"],
|
||||
"storage-driver": "devicemapper",
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "100m",
|
||||
"max-file": "3"
|
||||
}
|
||||
}
|
||||
```
|
345
CloudNative/Docker/Docker安装配置.doc
Normal file
|
@ -0,0 +1,345 @@
|
|||
Message-ID: <1350779792.5.1565776340845.JavaMail.root@IEX-SZI-MGT-ATLASSIAN>
|
||||
Subject: Exported From Confluence
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/related;
|
||||
boundary="----=_Part_4_448096307.1565776340836"
|
||||
|
||||
------=_Part_4_448096307.1565776340836
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
Content-Transfer-Encoding: quoted-printable
|
||||
Content-Location: file:///C:/exported.html
|
||||
|
||||
<html xmlns:o=3D'urn:schemas-microsoft-com:office:office'
|
||||
xmlns:w=3D'urn:schemas-microsoft-com:office:word'
|
||||
xmlns:v=3D'urn:schemas-microsoft-com:vml'
|
||||
xmlns=3D'urn:w3-org-ns:HTML'>
|
||||
<head>
|
||||
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8=
|
||||
">
|
||||
<title>Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=BD=AE</title>
|
||||
<!--[if gte mso 9]>
|
||||
<xml>
|
||||
<o:OfficeDocumentSettings>
|
||||
<o:TargetScreenSize>1024x640</o:TargetScreenSize>
|
||||
<o:PixelsPerInch>72</o:PixelsPerInch>
|
||||
<o:AllowPNG/>
|
||||
</o:OfficeDocumentSettings>
|
||||
<w:WordDocument>
|
||||
<w:View>Print</w:View>
|
||||
<w:Zoom>90</w:Zoom>
|
||||
<w:DoNotOptimizeForBrowser/>
|
||||
</w:WordDocument>
|
||||
</xml>
|
||||
<![endif]-->
|
||||
<style>
|
||||
<!--
|
||||
@page Section1 {
|
||||
size: 8.5in 11.0in;
|
||||
margin: 1.0in;
|
||||
mso-header-margin: .5in;
|
||||
mso-footer-margin: .5in;
|
||||
mso-paper-source: 0;
|
||||
}
|
||||
|
||||
table {
|
||||
border: solid 1px;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
table td, table th {
|
||||
border: solid 1px;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
td {
|
||||
page-break-inside: avoid;
|
||||
}
|
||||
|
||||
tr {
|
||||
page-break-after: avoid;
|
||||
}
|
||||
|
||||
div.Section1 {
|
||||
page: Section1;
|
||||
}
|
||||
|
||||
/* Confluence print stylesheet. Common to all themes for print medi=
|
||||
a */
|
||||
/* Full of !important until we improve batching for print CSS */
|
||||
|
||||
@media print {
|
||||
#main {
|
||||
padding-bottom: 1em !important; /* The default padding of 6em is to=
|
||||
o much for printouts */
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: Arial, Helvetica, FreeSans, sans-serif;
|
||||
font-size: 10pt;
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
body, #full-height-container, #main, #page, #content, .has-personal-sid=
|
||||
ebar #content {
|
||||
background: #fff !important;
|
||||
color: #000 !important;
|
||||
border: 0 !important;
|
||||
width: 100% !important;
|
||||
height: auto !important;
|
||||
min-height: auto !important;
|
||||
margin: 0 !important;
|
||||
padding: 0 !important;
|
||||
display: block !important;
|
||||
}
|
||||
|
||||
a, a:link, a:visited, a:focus, a:hover, a:active {
|
||||
color: #000;
|
||||
}
|
||||
|
||||
#content h1,
|
||||
#content h2,
|
||||
#content h3,
|
||||
#content h4,
|
||||
#content h5,
|
||||
#content h6 {
|
||||
font-family: Arial, Helvetica, FreeSans, sans-serif;
|
||||
page-break-after: avoid;
|
||||
}
|
||||
|
||||
pre {
|
||||
font-family: Monaco, "Courier New", monospace;
|
||||
}
|
||||
|
||||
#header,
|
||||
.aui-header-inner,
|
||||
#navigation,
|
||||
#sidebar,
|
||||
.sidebar,
|
||||
#personal-info-sidebar,
|
||||
.ia-fixed-sidebar,
|
||||
.page-actions,
|
||||
.navmenu,
|
||||
.ajs-menu-bar,
|
||||
.noprint,
|
||||
.inline-control-link,
|
||||
.inline-control-link a,
|
||||
a.show-labels-editor,
|
||||
.global-comment-actions,
|
||||
.comment-actions,
|
||||
.quick-comment-container,
|
||||
#addcomment {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* CONF-28544 cannot print multiple pages in IE */
|
||||
#splitter-content {
|
||||
position: relative !important;
|
||||
}
|
||||
|
||||
.comment .date::before {
|
||||
content: none !important; /* remove middot for print view */
|
||||
}
|
||||
|
||||
h1.pagetitle img {
|
||||
height: auto;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
.print-only {
|
||||
display: block;
|
||||
}
|
||||
|
||||
#footer {
|
||||
position: relative !important; /* CONF-17506 Place the footer at en=
|
||||
d of the content */
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background: none;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
#poweredby {
|
||||
border-top: none;
|
||||
background: none;
|
||||
}
|
||||
|
||||
#poweredby li.print-only {
|
||||
display: list-item;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
#poweredby li.noprint {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* no width controls in print */
|
||||
.wiki-content .table-wrap,
|
||||
.wiki-content p,
|
||||
.panel .codeContent,
|
||||
.panel .codeContent pre,
|
||||
.image-wrap {
|
||||
overflow: visible !important;
|
||||
}
|
||||
|
||||
/* TODO - should this work? */
|
||||
#children-section,
|
||||
#comments-section .comment,
|
||||
#comments-section .comment .comment-body,
|
||||
#comments-section .comment .comment-content,
|
||||
#comments-section .comment p {
|
||||
page-break-inside: avoid;
|
||||
}
|
||||
|
||||
#page-children a {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/**
|
||||
hide twixies
|
||||
|
||||
the specificity here is a hack because print styles
|
||||
are getting loaded before the base styles. */
|
||||
#comments-section.pageSection .section-header,
|
||||
#comments-section.pageSection .section-title,
|
||||
#children-section.pageSection .section-header,
|
||||
#children-section.pageSection .section-title,
|
||||
.children-show-hide {
|
||||
padding-left: 0;
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
.children-show-hide.icon {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* personal sidebar */
|
||||
.has-personal-sidebar #content {
|
||||
margin-right: 0px;
|
||||
}
|
||||
|
||||
.has-personal-sidebar #content .pageSection {
|
||||
margin-right: 0px;
|
||||
}
|
||||
|
||||
.no-print, .no-print * {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
-->
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=BD=AE</h1>
|
||||
<div class=3D"Section1">
|
||||
<p>=E7=8E=AF=E5=A2=83=EF=BC=9A</p>
|
||||
<p> CentOS 7.3 x64</p>
|
||||
<p><br></p>
|
||||
<h3 id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=BD=AE-=E5=8D=B8=E8=BD=BD=E6=
|
||||
=97=A7=E7=89=88Docker">=E5=8D=B8=E8=BD=BD=E6=97=A7=E7=89=88Docker</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">yum -y remove docker docker-client docker-client-latest docker-comm=
|
||||
on docker-latest docker-latest-logrotate docker-logrotate docker-selinux do=
|
||||
cker-engine-selinux docker-engine</pre>=20
|
||||
</div>=20
|
||||
</div>
|
||||
<h3 class=3D"auto-cursor-target" id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=
|
||||
=BD=AE-=E5=AE=89=E8=A3=85=E5=BF=85=E8=A6=81=E4=BE=9D=E8=B5=96=E5=8C=85">=E5=
|
||||
=AE=89=E8=A3=85=E5=BF=85=E8=A6=81=E4=BE=9D=E8=B5=96=E5=8C=85</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">yum install -y yum-utils device-mapper-persistent-data lvm2</pre>=
|
||||
=20
|
||||
</div>=20
|
||||
</div>
|
||||
<h3 class=3D"auto-cursor-target" id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=
|
||||
=BD=AE-=E6=B7=BB=E5=8A=A0yum=E6=BA=90">=E6=B7=BB=E5=8A=A0yum=E6=BA=90</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/l=
|
||||
inux/centos/docker-ce.repo</pre>=20
|
||||
</div>=20
|
||||
</div>
|
||||
<h3 class=3D"auto-cursor-target" id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=
|
||||
=BD=AE-=E5=AE=89=E8=A3=85Docker-ce">=E5=AE=89=E8=A3=85Docker-ce</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">yum makecache fast
|
||||
yum install docker-ce</pre>=20
|
||||
</div>=20
|
||||
</div>
|
||||
<h3 class=3D"auto-cursor-target" id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=
|
||||
=BD=AE-=E5=90=AF=E5=8A=A8Docker">=E5=90=AF=E5=8A=A8Docker</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">systemctl enable docker
|
||||
systemctl start docker</pre>=20
|
||||
</div>=20
|
||||
</div>
|
||||
<h3 class=3D"auto-cursor-target" id=3D"Docker=E5=AE=89=E8=A3=85=E9=85=8D=E7=
|
||||
=BD=AE-=E6=9F=A5=E7=9C=8BDocker=E7=89=88=E6=9C=AC">=E6=9F=A5=E7=9C=8BDocker=
|
||||
=E7=89=88=E6=9C=AC</h3>
|
||||
<div class=3D"code panel pdl" style=3D"border-width: 1px;">=20
|
||||
<div class=3D"codeHeader panelHeader pdl hide-border-bottom">=20
|
||||
<b class=3D"code-title"></b>=20
|
||||
<span class=3D"collapse-source expand-control" style=3D"display:none;"><spa=
|
||||
n class=3D"expand-control-icon icon"> </span><span class=3D"expand-con=
|
||||
trol-text">=E5=B1=95=E5=BC=80=E6=BA=90=E7=A0=81</span></span>=20
|
||||
<span class=3D"collapse-spinner-wrapper"></span>=20
|
||||
</div>=20
|
||||
<div class=3D"codeContent panelContent pdl hide-toolbar">=20
|
||||
<pre class=3D"syntaxhighlighter-pre" data-syntaxhighlighter-params=3D"brush=
|
||||
: bash; gutter: true; theme: Confluence; collapse: true" data-theme=3D"Conf=
|
||||
luence">docker version</pre>=20
|
||||
</div>=20
|
||||
</div>
|
||||
<p><br></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
------=_Part_4_448096307.1565776340836--
|
BIN
CloudNative/Docker/Docker监控.docx
Normal file
2
CloudNative/Docker/Namespace和Cgroup.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
Namespace 的作用是“隔离”,它让应用进程只能看到该 Namespace 内的“世界”;
|
||||
Cgroups 的作用是“限制”,它给这个“世界”围上了一圈看不见的墙
|
17
CloudNative/Docker/cgroup限制cpu
Normal file
|
@ -0,0 +1,17 @@
|
|||
cgroup用来管理linux下的cpu资源,使用cgroup可以很好的限制程序的资源使用情况,下面是对cgroup限制程序cpu使用情况的一些介绍:
|
||||
|
||||
1、首先,构造一个占用cpu资源的程序
|
||||
|
||||
echo 'while True:pass'|python &
|
||||
|
||||
使用top命令可以看到该进程CPU使用达到90%以上
|
||||
|
||||
2、进入到/sys/fs/cgroup/cpu目录下,创建一个文件夹,例如test
|
||||
|
||||
3、进入到test文件夹下,会看到已自动创建了一些文件,此时,输入
|
||||
|
||||
echo 50000 > cpu.cfs_quota_us
|
||||
|
||||
echo 1741 > tasks
|
||||
|
||||
会使得把1741进程的cpu占用时间比例降到50%。使用top命令可以确认查看该设置是否成功。
|
148
CloudNative/Docker/docker-compose相关.md
Normal file
|
@ -0,0 +1,148 @@
|
|||
常用命令
|
||||
```
|
||||
docker-compose up -d nginx 构建建启动nignx容器
|
||||
|
||||
docker-compose exec nginx bash 登录到nginx容器中
|
||||
|
||||
docker-compose down 删除所有nginx容器,镜像
|
||||
|
||||
docker-compose ps 显示所有容器
|
||||
|
||||
docker-compose restart nginx 重新启动nginx容器
|
||||
|
||||
docker-compose run --no-deps --rm php-fpm php -v 在php-fpm中不启动关联容器,并容器执行php -v 执行完成后删除容器
|
||||
|
||||
docker-compose build nginx 构建镜像 。
|
||||
|
||||
docker-compose build --no-cache nginx 不带缓存的构建。
|
||||
|
||||
docker-compose logs nginx 查看nginx的日志
|
||||
|
||||
docker-compose logs -f nginx 查看nginx的实时日志
|
||||
|
||||
|
||||
|
||||
docker-compose config -q 验证(docker-compose.yml)文件配置,当配置正确时,不输出任何内容,当文件配置错误,输出错误信息。
|
||||
|
||||
docker-compose events --json nginx 以json的形式输出nginx的docker日志
|
||||
|
||||
docker-compose pause nginx 暂停nignx容器
|
||||
|
||||
docker-compose unpause nginx 恢复ningx容器
|
||||
|
||||
docker-compose rm nginx 删除容器(删除前必须关闭容器)
|
||||
|
||||
docker-compose stop nginx 停止nignx容器
|
||||
|
||||
docker-compose start nginx 启动nignx容器
|
||||
```
|
||||
|
||||
## yaml官方示例解析
|
||||
```
|
||||
version: "3.7"
|
||||
services:
|
||||
|
||||
## 使用已有镜像redis:alpine
|
||||
redis:
|
||||
image: redis:alpine
|
||||
ports:
|
||||
- "6379"
|
||||
networks:
|
||||
- frontend
|
||||
deploy:
|
||||
replicas: 2
|
||||
update_config:
|
||||
parallelism: 2
|
||||
delay: 10s
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
db:
|
||||
image: postgres:9.4
|
||||
volumes:
|
||||
- db-data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- backend
|
||||
deploy:
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
vote:
|
||||
image: dockersamples/examplevotingapp_vote:before
|
||||
ports:
|
||||
- "5000:80"
|
||||
networks:
|
||||
- frontend
|
||||
depends_on:
|
||||
- redis
|
||||
deploy:
|
||||
replicas: 2
|
||||
update_config:
|
||||
parallelism: 2
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
result:
|
||||
image: dockersamples/examplevotingapp_result:before
|
||||
ports:
|
||||
- "5001:80"
|
||||
networks:
|
||||
- backend
|
||||
depends_on:
|
||||
- db
|
||||
deploy:
|
||||
replicas: 1
|
||||
update_config:
|
||||
parallelism: 2
|
||||
delay: 10s
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
worker:
|
||||
image: dockersamples/examplevotingapp_worker
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
labels: [APP=VOTING]
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 10s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
visualizer:
|
||||
image: dockersamples/visualizer:stable
|
||||
ports:
|
||||
- "8080:8080"
|
||||
stop_grace_period: 1m30s
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
deploy:
|
||||
placement:
|
||||
constraints: [node.role == manager]
|
||||
|
||||
networks:
|
||||
frontend:
|
||||
backend:
|
||||
|
||||
volumes:
|
||||
db-data:
|
||||
|
||||
```
|
||||
### 说明:
|
||||
#### 容器不仅可以使用已有的镜像构建,还可以使用Dockerfile
|
||||
```
|
||||
## Dockerfile所在目录,可以是相对路径,也可以是绝对路径
|
||||
build: /path/to/dir
|
||||
build: ./dir
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: path/of/Dockerfile
|
||||
```
|
||||
|
||||
####
|
19
CloudNative/Docker/docker多容器共享网络.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
1、创建网络
|
||||
```
|
||||
docker network create -d bridge mynet
|
||||
```
|
||||
|
||||
2、启动容器nginx
|
||||
```
|
||||
docker run -d --name=nginx --network mynet --network-alias nginx -p 80:80 -v /usr/local/nginx/nginx.conf:/etc/nginx/nginx.conf -v /usr/local/nginx/html:/usr/share/nginx/html nginx
|
||||
```
|
||||
|
||||
3、启动容器php
|
||||
```
|
||||
docker run -d --name=php --network mynet --network-alias php -v /data/www:/usr/local/nginx/html php
|
||||
```
|
||||
|
||||
4、nginx容器pingphp
|
||||
```
|
||||
docker exec -it nginx ping php
|
||||
```
|
18
CloudNative/Docker/docker容器指定ip.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
默认的bridge网络不支持指定IP地址
|
||||
```
|
||||
# docker run -itd --net bridge --ip 172.17.0.10 centos:latest /bin/bash
|
||||
6eb1f228cf308d1c60db30093c126acbfd0cb21d76cb448c678bab0f1a7c0df6
|
||||
docker: Error response from daemon: User specified IP address is supported on user defined networks only.
|
||||
```
|
||||
需要自定义网络
|
||||
```
|
||||
docker network create --subnet=172.18.0.0 /16 mynetwork
|
||||
```
|
||||
查看网络
|
||||
```
|
||||
docker network list
|
||||
```
|
||||
指定ip
|
||||
```
|
||||
docker run -itd --name networkTest1 --net mynetwork --ip 172.18.0.2 centos:latest /bin/bash
|
||||
```
|
14
CloudNative/Docker/docker清理镜像.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
```
|
||||
## 清理tag为none的镜像
|
||||
docker rmi $(docker images -f "dangling=true" -q)
|
||||
```
|
||||
|
||||
```
|
||||
## 清理不再使用的镜像
|
||||
docker image prune -a --force
|
||||
```
|
||||
|
||||
```
|
||||
## 清理两天未使用的镜像
|
||||
docker image prune -a --force --filter "until=48h"
|
||||
```
|
6
CloudNative/Docker/dumb-init.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
容器化环境中,往往直接运行应用程序,而缺少初始化系统(如systemd、sysvinit等)。这可能需要应用程序来处理系统信号,接管子进程,进而导致容器无法停止、产生僵尸进程等问题。dumb-init旨在模拟初始化系统功能,避免上述问题的发生。
|
||||
```
|
||||
### 举例
|
||||
docker run quay.io/gravitational/debian-tall /usr/bin/dumb-init /bin/sh -c "sleep 10000"
|
||||
```
|
||||
除了在容器中使用之外,dumb-init也可以直接在shell脚本中使用。使用dumb-init作为shell的父进程,可以解决shell创建的子进程优雅退出问题
|
11
CloudNative/Docker/一些基本命令.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
#### 查看docker进程(宿主机上的真实pid)
|
||||
docker inspect --format '{{.State.Pid}}' containerID
|
||||
|
||||
#### 查看容器挂载的volume id等信息
|
||||
docker volume ls
|
||||
|
||||
#### 查看镜像构建历史
|
||||
docker history cid
|
||||
|
||||
#### 查看镜像构建历史(展开CREATED BY 列)
|
||||
docker history cid --no-trunc
|
BIN
CloudNative/Docker/构建docker应用时,push镜像失败.docx
Normal file
3
CloudNative/Docker/查看docker容器的进程id.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
```
|
||||
docker inspect -f '{{.State.Pid}} {{.Id}}' $(docker ps -a -q)
|
||||
```
|
8
CloudNative/Docker/生产进入docker容器
Normal file
|
@ -0,0 +1,8 @@
|
|||
# 宿主机安装util-linux
|
||||
yum -y install util-linux
|
||||
|
||||
# 获取容器pid
|
||||
docker inspect --format "{{.State.Pid}}" 容器ID
|
||||
|
||||
# 进入容器
|
||||
nsenter --target 容器pid --mount --uts --ipc --net --pid
|
67
CloudNative/ErrorProcess/Calico异常.md
Normal file
|
@ -0,0 +1,67 @@
|
|||
calico异常,
|
||||
```
|
||||
kubectl get pod -n kube-system
|
||||
```
|
||||
显示
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
calico-kube-controllers-744cfdf676-4qqph 1/1 Running 0 31m
|
||||
calico-node-8jr59 0/1 Running 0 20s
|
||||
calico-node-cs79v 0/1 Running 0 20s
|
||||
calico-node-fkstd 0/1 Running 0 20s
|
||||
coredns-7f89b7bc75-6md7d 1/1 Running 0 53m
|
||||
coredns-7f89b7bc75-p88r5 1/1 Running 0 53m
|
||||
etcd-kubernetes-master 1/1 Running 0 53m
|
||||
kube-apiserver-kubernetes-master 1/1 Running 0 53m
|
||||
kube-controller-manager-kubernetes-master 1/1 Running 0 53m
|
||||
kube-proxy-6tfvm 1/1 Running 0 26m
|
||||
kube-proxy-mgqv2 1/1 Running 0 26m
|
||||
kube-proxy-v25vl 1/1 Running 0 53m
|
||||
kube-scheduler-kubernetes-master 1/1 Running 0 53m
|
||||
```
|
||||
查看pod描述信息
|
||||
```
|
||||
kubectl describe pod calico-node-npjjr -n kube-system
|
||||
```
|
||||
显示
|
||||
```
|
||||
Warning Unhealthy 72s kubelet Readiness probe failed: 2020-12-18 13:55:29.276 [INFO][120] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 62s kubelet Readiness probe failed: 2020-12-18 13:55:39.278 [INFO][156] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 52s kubelet Readiness probe failed: 2020-12-18 13:55:49.283 [INFO][189] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 42s kubelet Readiness probe failed: 2020-12-18 13:55:59.279 [INFO][215] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 32s kubelet Readiness probe failed: 2020-12-18 13:56:09.280 [INFO][249] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 22s kubelet Readiness probe failed: 2020-12-18 13:56:19.276 [INFO][276] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 12s kubelet Readiness probe failed: 2020-12-18 13:56:29.276 [INFO][302] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
Warning Unhealthy 2s kubelet Readiness probe failed: 2020-12-18 13:56:39.272 [INFO][335] confd/health.go 180: Number of node(s) with BGP peering established = 0
|
||||
calico/node is not ready: BIRD is not ready: BGP not established with 172.17.0.9,172.17.0.3
|
||||
```
|
||||
修改calico.yaml文件
|
||||
```
|
||||
/*
|
||||
调整calicao 网络插件的网卡发现机制,修改IP_AUTODETECTION_METHOD对应的value值。官方提供的yaml文件中,ip识别策略(IPDETECTMETHOD)没有配置,即默认为first-found,这会导致一个网络异常的ip作为nodeIP被注册,从而影响node-to-node mesh。我们可以修改成can-reach或者interface的策略,尝试连接某一个Ready的node的IP,以此选择出正确的IP。
|
||||
*/
|
||||
|
||||
// calico.yaml 文件添加以下二行
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "interface=ens.*" # ens 根据实际网卡开头配置
|
||||
|
||||
// 配置如下
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,bgp"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "interface=ens.*"
|
||||
#或者 value: "interface=ens160"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
```
|
12
CloudNative/ErrorProcess/强制删除异常资源.md
Normal file
|
@ -0,0 +1,12 @@
|
|||
## 1、强制删除pod
|
||||
```
|
||||
kubectl delete pod -n <namespace> <podname> --force --grace-period=0
|
||||
```
|
||||
## 2、强制删除pv
|
||||
```
|
||||
kubectl patch pv <pvname> -p '{"metadata":{"finalizers":null}}'
|
||||
```
|
||||
## 3、强制删除pvc
|
||||
```
|
||||
kubectl patch pvc <pvcname> -n <namespace> -p '{"metadata":{"finalizers":null}}'
|
||||
```
|
19
CloudNative/ErrorProcess/无法初始化k8s集群.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
初始化集群报以下错误:
|
||||
```
|
||||
error execution phase preflight: [preflight] Some fatal errors occurred:
|
||||
[ERROR CRI]: container runtime is not running: output: E0526 20:04:52.510582 13459 remote_runtime.go:925] "Status from runtime service failed" err="rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.RuntimeService"
|
||||
```
|
||||
解决方法:
|
||||
```
|
||||
rm /etc/containerd/config.toml
|
||||
systemctl restart containerd
|
||||
```
|
||||
或者
|
||||
```
|
||||
cat > /etc/containerd/config.toml <<EOF
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
systemd_cgroup = true
|
||||
EOF
|
||||
systemctl restart containerd
|
||||
|
||||
```
|
22
CloudNative/ErrorProcess/解决Terminating状态的Pod删不掉的问题.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
#### 1,问题描述
|
||||
1、最近发现咋 Kubernetes 集群上有个处于 Terminating 状态的 Pod:
|
||||
```
|
||||
[ec2-user@k8s-master01 ~]$ kubectl get pod -n infra
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
jenkins-5c54cf5557-nz4l2 1/1 Terminating 2 (8d ago) 14d
|
||||
```
|
||||
2、使用命令删除不动
|
||||
```
|
||||
[ec2-user@k8s-master01 ~]$ kubectl delete pod -n infra jenkins-5c54cf5557-nz4l2
|
||||
pod "jenkins-5c54cf5557-nz4l2" deleted
|
||||
|
||||
```
|
||||
#### 2,解决办法
|
||||
1、无论各种方式生成的 pod, 均可以使用如下命令强制删除:
|
||||
```
|
||||
kubectl delete pods <pod> --grace-period=0 --force
|
||||
```
|
||||
2、以上pod删除命令如下
|
||||
```
|
||||
kubectl delete pod -n infra jenkins-5c54cf5557-nz4l2 --grace-period=0 --force
|
||||
```
|
3
CloudNative/ErrorProcess/解决用文件创建cm格式混乱问题.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
```
|
||||
kubectl get cm [YOUR CONFIGMAP NAME] -o yaml | sed -E 's/[[:space:]]+\\n/\\n/g' | kubectl apply -f -
|
||||
```
|
103
CloudNative/ErrorProcess/记一次挖矿程序删除处理.md
Normal file
|
@ -0,0 +1,103 @@
|
|||
#### 现象
|
||||
> **服务器CPU内存暴涨,在服务器上操作卡顿**
|
||||
|
||||
##### 查看服务器负载及异常进程PID
|
||||
```
|
||||
top
|
||||
```
|
||||
结果如下(*结果较多,已去掉无关项*)
|
||||
```
|
||||
top - 15:52:08 up 13 days, 6:21, 3 users, load average: 3.52, 3.23, 3.04
|
||||
Tasks: 226 total, 1 running, 225 sleeping, 0 stopped, 0 zombie
|
||||
%Cpu(s): 1.5 us, 0.5 sy, 98.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
|
||||
KiB Mem : 8173400 total, 50392 free, 7783940 used, 339068 buff/cache
|
||||
KiB Swap: 0 total, 0 free, 0 used. 146592 avail Mem
|
||||
|
||||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
|
||||
25586 root 20 0 2439064 2.289g 4 S 190.1 29.4 120:20.64 server
|
||||
```
|
||||
键盘输入“c”,显示进程完整的COMMAND列,如下
|
||||
```
|
||||
top - 15:52:23 up 13 days, 6:22, 3 users, load average: 3.72, 3.28, 3.06
|
||||
Tasks: 227 total, 1 running, 226 sleeping, 0 stopped, 0 zombie
|
||||
%Cpu(s): 3.0 us, 1.3 sy, 95.7 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
|
||||
KiB Mem : 8173400 total, 47780 free, 7787388 used, 338232 buff/cache
|
||||
KiB Swap: 0 total, 0 free, 0 used. 142808 avail Mem
|
||||
|
||||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
|
||||
25586 root 20 0 2439064 2.289g 4 S 186.1 29.4 120:47.58 /opt/server
|
||||
```
|
||||
由以上信息可知,进程执行文件为/opt/server,pid为25586
|
||||
#### 查看进程执行文件
|
||||
发现/opt/server文件不存在,/proc/25586/目录下也显示exe文件不存在
|
||||
#### 查看定时任务是否异常
|
||||
```
|
||||
cat /etc/passwd | awk -F: '{print $1}' | xargs -I {} crontab -l -u {}
|
||||
```
|
||||
发现系统无定时任务
|
||||
#### 查看/tmp目录下是否存在异常目录或文件
|
||||
发现/tmp目录正常
|
||||
#### 查看进程父进程
|
||||
```
|
||||
ps -ef | grep server
|
||||
```
|
||||
结果为
|
||||
```
|
||||
root 25586 1793 99 14:45 ? 02:08:41 /opt/server
|
||||
```
|
||||
父进程为1793
|
||||
#### 查看父进程
|
||||
```
|
||||
ll /proc/1793/
|
||||
```
|
||||
其中exe为
|
||||
```
|
||||
lrwxrwxrwx 1 root root 0 Jan 11 15:56 exe -> /bin/busybox*
|
||||
```
|
||||
考虑服务器并没有使用busybox,但是docker常用
|
||||
#### 查看docker进程
|
||||
```
|
||||
docker ps -a
|
||||
```
|
||||
结果如下
|
||||
```
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
169486212d4b zqbxacdsx "#(nop)" 3 days ago Up 3 days (healthy) harbor-jobservice
|
||||
```
|
||||
果然发现存在奇怪的容器,镜像名称为zqbxacdsx
|
||||
#### 查看docker镜像id
|
||||
```
|
||||
docker images
|
||||
```
|
||||
结果为aa05538acecf
|
||||
#### 查看镜像构建过程
|
||||
```
|
||||
## aa05538acecf 镜像ID
|
||||
docker history aa05538acecf --no-trunc
|
||||
```
|
||||
发现该镜像只是增加了一个脚本main.sh
|
||||
#### 查看main.sh内容
|
||||
进入容器中
|
||||
```
|
||||
## 169486212d4b 容器ID
|
||||
docker exec -it 169486212d4b /bin/sh
|
||||
```
|
||||
查看main.sh
|
||||
```
|
||||
cat main.sh
|
||||
```
|
||||
果然,main.sh是一个自动下载挖矿程序的脚本
|
||||
#### 停止挖矿容器
|
||||
```
|
||||
docker stop 169486212d4b
|
||||
```
|
||||
#### 删除挖矿容器
|
||||
```
|
||||
docker rm 169486212d4b
|
||||
```
|
||||
#### 删除挖矿镜像
|
||||
```
|
||||
docker rmi aa05538acecf
|
||||
```
|
||||
观察一段时间,发现异常进程未重新启动,服务器运行平稳
|
||||
至此,挖矿病毒处理完成。接下来防火墙关闭必要端口,docker配置加固
|
BIN
CloudNative/Kubernetes/Base/CNI选择.bmp
Normal file
After Width: | Height: | Size: 4 MiB |
32
CloudNative/Kubernetes/Base/CRD.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
```
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
# name must match the spec fields below, and be in the form: <plural>.<group>
|
||||
name: crontabs.stable.example.com
|
||||
spec:
|
||||
# group name to use for REST API: /apis/<group>/<version>
|
||||
group: stable.example.com
|
||||
# versions to use for REST API: /apis/<group>/<version>
|
||||
versions:
|
||||
- name: v1beta1
|
||||
# Each version can be enabled/disabled by Served flag.
|
||||
served: true
|
||||
# One and only one version must be marked as the storage version.
|
||||
storage: true
|
||||
- name: v1
|
||||
served: true
|
||||
storage: false
|
||||
# either Namespaced or Cluster
|
||||
scope: Namespaced
|
||||
names:
|
||||
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
|
||||
plural: crontabs
|
||||
# singular name to be used as an alias on the CLI and for display
|
||||
singular: crontab
|
||||
# kind is normally the CamelCased singular type. Your resource manifests use this.
|
||||
kind: CronTab
|
||||
# shortNames allow shorter string to match your resource on the CLI
|
||||
shortNames:
|
||||
- ct
|
||||
```
|
6
CloudNative/Kubernetes/Base/Calico使用BGP模式网络.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
calico官方提供的资源清单,默认使用IPIP模式网络,若变更为BGP,需要修改配置文件,
|
||||
```yaml
|
||||
# 关闭IPIP,Always改为Never
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Never"
|
||||
```
|
168
CloudNative/Kubernetes/Base/ConfigMap.md
Normal file
|
@ -0,0 +1,168 @@
|
|||
### 创建ConfigMap的方法:
|
||||
```
|
||||
通过直接在命令行中指定configmap参数创建,即--from-literal
|
||||
通过指定文件创建,即将一个配置文件创建为一个ConfigMap--from-file=<文件>
|
||||
通过指定目录创建,即将一个目录下的所有配置文件创建为一个ConfigMap,--from-file=<目录>
|
||||
事先写好标准的configmap的yaml文件,然后kubectl create -f 创建
|
||||
```
|
||||
|
||||
#### 命令行指定参数创建
|
||||
```
|
||||
kubectl create configmap demo_config --from-literal=name=demo --from-literal=version=v1
|
||||
```
|
||||
|
||||
#### 指定文件创建
|
||||
```
|
||||
kubectl create configmap demo_config --from-file=k1=/path/to/file1 --from-file=k2=/path/to/file2
|
||||
```
|
||||
|
||||
#### 指定目录创建(以目录下的文件名为键,内容为值)
|
||||
只识别文件,忽略目录下的子目录
|
||||
```
|
||||
kubectl create configmap demo_config --from-file=k1=/path1 --from-file=/path2
|
||||
```
|
||||
|
||||
#### 以yaml文件创建
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
|
||||
metadata:
|
||||
name: demo_config
|
||||
namespace: demo-ns
|
||||
|
||||
labels:
|
||||
name: demo
|
||||
version: v1
|
||||
|
||||
data:
|
||||
name: demo
|
||||
version: v1
|
||||
```
|
||||
|
||||
### 使用ConfigMap
|
||||
```
|
||||
第一种是通过环境变量的方式,直接传递给pod
|
||||
使用configmap中指定的key
|
||||
使用configmap中所有的key
|
||||
第二种是通过在pod的命令行下运行的方式(启动命令中)
|
||||
第三种是作为volume的方式挂载到pod内
|
||||
```
|
||||
|
||||
#### valueFrom、configMapKeyRef指定key
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: pod-demo1
|
||||
namespace: demo-ns
|
||||
|
||||
labels:
|
||||
name: pod-demo1
|
||||
version: v1
|
||||
spec:
|
||||
containers:
|
||||
- name: demo
|
||||
image: demo1
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- "echo hello demo"
|
||||
|
||||
ports:
|
||||
- name: demo-http
|
||||
containerPort: 80
|
||||
protocol:TCP
|
||||
|
||||
env:
|
||||
valueFrom:
|
||||
- name: CONFIG_ENV
|
||||
configMapKeyRef:
|
||||
name: demo_config
|
||||
key: name
|
||||
```
|
||||
|
||||
#### envFrom、configMapRef指定configmap中所有key
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: pod-demo1
|
||||
namespace: demo-ns
|
||||
|
||||
labels:
|
||||
name: pod-demo1
|
||||
version: v1
|
||||
spec:
|
||||
containers:
|
||||
- name: demo
|
||||
image: demo1
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- "echo hello demo"
|
||||
|
||||
ports:
|
||||
- name: demo-http
|
||||
containerPort: 80
|
||||
protocol:TCP
|
||||
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: demo_config
|
||||
```
|
||||
|
||||
#### 在命令行下引用时,需要先设置为环境变量,之后可以通过$(VAR_NAME)设置容器启动命令的启动参数
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dapi-test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: k8s.gcr.io/busybox
|
||||
command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ]
|
||||
env:
|
||||
- name: SPECIAL_LEVEL_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: special-config
|
||||
key: SPECIAL_LEVEL
|
||||
- name: SPECIAL_TYPE_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: special-config
|
||||
key: SPECIAL_TYPE
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
#### 作为volume挂载使用
|
||||
##### 以key为文件名,value为内容
|
||||
```
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-configmap
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-configmap
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx-configmap
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: config-volume4
|
||||
mountPath: /tmp/config4
|
||||
volumes:
|
||||
- name: config-volume4
|
||||
configMap:
|
||||
name: test-config4
|
||||
```
|
25
CloudNative/Kubernetes/Base/CronJob.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
```
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
|
||||
metadata:
|
||||
name: hello
|
||||
|
||||
spec:
|
||||
schedule: "*/1 * * * *" ## 每分钟执行一次
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: busybox
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo hello
|
||||
restartPolicy: OnFailure
|
||||
startingDeadlineSeconds: 10 ## job最长启动时间
|
||||
concurrencyPolicy: Allow ## 是否允许并行运行
|
||||
successfulJobsHistoryLimit: 3 ## 允许留存历史job的个数
|
||||
```
|
10
CloudNative/Kubernetes/Base/DNS.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
```
|
||||
Service
|
||||
A record:生成 my-svc.my-namespace.svc.cluster.local,解析 IP 分为两种情况
|
||||
普通 Service 解析为 Cluster IP
|
||||
Headless Service 解析为指定的 Pod IP 列表
|
||||
SRV record:生成 _my-port-name._my-port-protocol.my-svc.my-namespace.svc.cluster.local
|
||||
Pod
|
||||
A record:pod-ip-address.my-namespace.pod.cluster.local
|
||||
指定 hostname 和 subdomain:hostname.custom-subdomain.default.svc.cluster.local
|
||||
```
|
69
CloudNative/Kubernetes/Base/DownwardAPI.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
Downward API,它的作用是:让 Pod 里的容器能够直接获取到这个 Pod API 对象本身的信息。减少与kubernetes的耦合
|
||||
```
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-downwardapi-volume
|
||||
labels:
|
||||
zone: us-est-coast
|
||||
cluster: test-cluster1
|
||||
rack: rack-22
|
||||
spec:
|
||||
containers:
|
||||
- name: client-container
|
||||
image: k8s.gcr.io/busybox
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- while true; do
|
||||
if [[ -e /etc/podinfo/labels ]]; then
|
||||
echo -en '\n\n'; cat /etc/podinfo/labels; fi;
|
||||
sleep 5;
|
||||
done;
|
||||
volumeMounts:
|
||||
- name: podinfo
|
||||
mountPath: /etc/podinfo
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: podinfo
|
||||
projected:
|
||||
sources:
|
||||
- downwardAPI:
|
||||
items:
|
||||
- path: "labels"
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels
|
||||
```
|
||||
在这个 Pod 的 YAML 文件中,我定义了一个简单的容器,声明了一个 projected 类型的 Volume。只不过这次 Volume 的数据来源,变成了 Downward API。而这个 Downward API Volume,则声明了要暴露 Pod 的 metadata.labels 信息给容器。
|
||||
通过这样的声明方式,当前 Pod 的 Labels 字段的值,就会被 Kubernetes 自动挂载成为容器里的 /etc/podinfo/labels 文件。而这个容器的启动命令,则是不断打印出 /etc/podinfo/labels 里的内容。所以,当我创建了这个 Pod 之后,就可以通过 kubectl logs 指令,查看到这些 Labels 字段被打印出来
|
||||
```
|
||||
$ kubectl create -f dapi-volume.yaml
|
||||
$ kubectl logs test-downwardapi-volume
|
||||
cluster="test-cluster1"
|
||||
rack="rack-22"
|
||||
zone="us-est-coast"
|
||||
```
|
||||
|
||||
当前支持的downward api
|
||||
```
|
||||
1. 使用fieldRef可以声明使用:
|
||||
spec.nodeName - 宿主机名字
|
||||
status.hostIP - 宿主机IP
|
||||
metadata.name - Pod的名字
|
||||
metadata.namespace - Pod的Namespace
|
||||
status.podIP - Pod的IP
|
||||
spec.serviceAccountName - Pod的Service Account的名字
|
||||
metadata.uid - Pod的UID
|
||||
metadata.labels['<KEY>'] - 指定<KEY>的Label值
|
||||
metadata.annotations['<KEY>'] - 指定<KEY>的Annotation值
|
||||
metadata.labels - Pod的所有Label
|
||||
metadata.annotations - Pod的所有Annotation
|
||||
|
||||
2. 使用resourceFieldRef可以声明使用:
|
||||
容器的CPU limit
|
||||
容器的CPU request
|
||||
容器的memory limit
|
||||
容器的memory request
|
||||
```
|
||||
|
||||
需要注意的是,Downward API 能够获取到的信息,一定是 Pod 里的容器进程启动之前就能够确定下来的信息。而如果你想要获取 Pod 容器运行后才会出现的信息,比如,容器进程的 PID,那就肯定不能使用 Downward API 了,而应该考虑在 Pod 里定义一个 sidecar 容器。
|
32
CloudNative/Kubernetes/Base/Flannel相关
Normal file
|
@ -0,0 +1,32 @@
|
|||
证书需要ca,ca key,flannel key与证书
|
||||
安装之前需要配置subnet.env文件,配置flannel管理集群的网络
|
||||
举例说明:
|
||||
FLANNEL_NETWORK=172.18.0.0/16 # flannel地址池
|
||||
FLANNEL_SUBNET=172.18.16.0/24 # 分配给每个宿主机docker0的网段
|
||||
FLANNEL_MTU=1500 # 数据分片大小
|
||||
FLANNEL_IPMASQ=false
|
||||
|
||||
flannel host-gw:
|
||||
宿主机要在一个二层网络下,使用同一个网关,维护路由表(etcd)
|
||||
使用的是路由转发,效率比较高
|
||||
|
||||
|
||||
flannel vxlan:
|
||||
宿主机可以在不同网络中,使用flannel0封包解包
|
||||
|
||||
|
||||
flannel udp
|
||||
|
||||
flannel依赖etcd,安装完成以后,需要在etcd中添加数据
|
||||
# etcdctl member list # 查看etd master
|
||||
|
||||
# etcdctl --endpoint http://10.1.0.0:2379 set /coreos.com/network/config '{"Network": "172.18.0.0/16", "SubnetLen":24, "Backend":{"Type": "VxLan", "Directrouting": True}}'
|
||||
"Directrouting": True,同一个node中,直接转发
|
||||
|
||||
|
||||
优化flannel
|
||||
安装iptables-service
|
||||
# 删除默认生成规则
|
||||
iptables -t nat -D POSTROUTING -s 本地docker弯管 ! -o docker0 -j MASQUERADE
|
||||
# 若本地docker网段访问的网段不是flannel的地址池,且发送的网桥不是docker0的,才进行snat转换(即容器之间不做转换)
|
||||
iptables -t nat -I POSTROUTING -s 本地docker网段 ! -d 172.18.0.0/16 ! -o docker0 -j MASQUERADE
|
18
CloudNative/Kubernetes/Base/HPA.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
horizontal pod autoscaling可以根据cpu使用率或自定应metrics自动扩展pod,支持(rs,deployment)
|
||||
控制器每隔30s(可以通过--horizontal-pod-autoscaler-sync-period 修改)查询metrics的资源使用情况
|
||||
支持三种metrics类型
|
||||
预定义metrics(比如 Pod 的 CPU)以利用率的方式计算
|
||||
自定义的pod metrics,以原始值(raw value)的方式计算
|
||||
自定义的object metrics
|
||||
支持两种metrics的查询方式:heapster和自定义的RESTful API
|
||||
支持多metrics
|
||||
|
||||
使用hpa之前需要确保已部署好metrics-server
|
||||
```
|
||||
## 创建pod与service
|
||||
kubectl run php --image=php --requests=cpu=200m --expose --port=80
|
||||
|
||||
## 创建autoscaler
|
||||
kubectl autoscale deployment php --cpu-percent=50 --min=1 --max=10
|
||||
|
||||
```
|
21
CloudNative/Kubernetes/Base/Ingress.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
ingress用于集群内部服务暴露到外部环境访问,它是一组请求路由规则的集合。
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
|
||||
metadata:
|
||||
name: demo-ingress
|
||||
namespace: demo-ns
|
||||
labels:
|
||||
name: demo-ingress
|
||||
version: v1
|
||||
spec:
|
||||
rules:
|
||||
- host: test.demo.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: demo-svc1
|
||||
servicePort: 80
|
||||
```
|
74
CloudNative/Kubernetes/Base/MiniKube安装配置.md
Normal file
|
@ -0,0 +1,74 @@
|
|||
# 安装docker-ce
|
||||
## 安装必要的系统工具软件
|
||||
`yum install -y yum-utils device-mapper-persistent-data lvm2`
|
||||
## 添加软件源
|
||||
```
|
||||
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
|
||||
```
|
||||
## 更新源并安装docker
|
||||
```
|
||||
yum makecache fast
|
||||
yum -y install docker-ce
|
||||
```
|
||||
## 配置docker加速
|
||||
### 修改/etc/docker/daemon.json文件,添加阿里云镜像加速
|
||||
***需要创建/etc/docker目录与daemon.json文件***
|
||||
```
|
||||
{
|
||||
"registry-mirrors": ["https://yn64512p.mirror.aliyuncs.com"]
|
||||
}
|
||||
```
|
||||
### 启动docker
|
||||
```
|
||||
systemctl daemon-reload
|
||||
systemctl restart docker
|
||||
```
|
||||
# 安装MiNiKube
|
||||
## 添加用户
|
||||
```
|
||||
useradd -u 530 -g docker k8s
|
||||
```
|
||||
## 下载kubectl,并移动到/home/k8s/.bin目录中(k8s用户可直接执行),并添加到PATH中
|
||||
```
|
||||
su - k8s
|
||||
|
||||
mkdir .bin
|
||||
|
||||
cd .bin && curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
|
||||
|
||||
chmod +x kubectl
|
||||
|
||||
cat ~/.bash_profile
|
||||
...
|
||||
PATH=$PATH:~/.bin
|
||||
export PATH
|
||||
...
|
||||
|
||||
source ~/.bash_profile
|
||||
|
||||
```
|
||||
|
||||
## 下载安装MiniKube
|
||||
```
|
||||
cd /home/k8s/.bin
|
||||
|
||||
wget https://github.com/kubernetes/minikube/releases/download/v1.15.0/minikube-linux-x86_64
|
||||
```
|
||||
## 启动MiniKube
|
||||
```
|
||||
minikube start --network-plugin=cni --cni=calico
|
||||
```
|
||||
|
||||
# 暂停集群
|
||||
```
|
||||
minikubu pause
|
||||
```
|
||||
# 停止集群
|
||||
```
|
||||
minikube stop
|
||||
```
|
||||
# 卸载集群
|
||||
```
|
||||
minikube delete --all
|
||||
```
|
||||
|
50
CloudNative/Kubernetes/Base/Namespace/ns中单个Pod的资源限额-cpu.md
Normal file
|
@ -0,0 +1,50 @@
|
|||
### 创建命名空间
|
||||
```
|
||||
kubectl create namespace demo-ns
|
||||
```
|
||||
|
||||
### 创建LimitRange, cpu-limit.yaml
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: cpu-limit-range
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
cpu: 1
|
||||
defaultRequest:
|
||||
cpu: 0.5
|
||||
type: Container
|
||||
```
|
||||
|
||||
### 命名空间上施加限制
|
||||
```
|
||||
kubectl apply cpu-limit.yaml --namespace demo-ns
|
||||
```
|
||||
|
||||
### 在该命名空间下创建pod定义,pod-limit-c-demo1.yaml
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: pod-lc-demo1
|
||||
namespace: demo-ns
|
||||
labels:
|
||||
app: pod-lc
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: pod-demo1-c
|
||||
image: busybox
|
||||
ports:
|
||||
- name: demo1-c-http
|
||||
containerPort: 80
|
||||
```
|
||||
|
||||
### 创建pod
|
||||
```
|
||||
kubectl apply pod-limit-c-demo1.yaml
|
||||
```
|
50
CloudNative/Kubernetes/Base/Namespace/ns中单个Pod的资源限额-内存.md
Normal file
|
@ -0,0 +1,50 @@
|
|||
### 创建命名空间
|
||||
```
|
||||
kubectl create namespace demo-ns
|
||||
```
|
||||
|
||||
### 创建LimitRange, memory-limit.yaml
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: mem-limit-range
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
memory: 512Mi
|
||||
defaultRequest:
|
||||
memory: 256Mi
|
||||
type: Container
|
||||
```
|
||||
|
||||
### 命名空间上施加限制
|
||||
```
|
||||
kubectl apply memory-limit.yaml --namespace demo-ns
|
||||
```
|
||||
|
||||
### 在该命名空间下创建pod定义,pod-limit-m-demo1.yaml
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: pod-lm-demo1
|
||||
namespace: demo-ns
|
||||
labels:
|
||||
app: pod-lm
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: pod-demo1-m
|
||||
image: busybox
|
||||
ports:
|
||||
- name: demo1-m-http
|
||||
containerPort: 80
|
||||
```
|
||||
|
||||
### 创建pod
|
||||
```
|
||||
kubectl apply pod-limit-m-demo1.yaml
|
||||
```
|
16
CloudNative/Kubernetes/Base/Namespace/ns中所有Pod的资源限额.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
```
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
|
||||
metadata:
|
||||
name: rq-demo-ns
|
||||
namespace: demo-ns
|
||||
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: 500m
|
||||
requests.memory: 1Gi
|
||||
limits.cpu: 1000m
|
||||
limits.memory: 2Gi
|
||||
|
||||
```
|
20
CloudNative/Kubernetes/Base/PV&&PVC/PV.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
```
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
|
||||
metadata:
|
||||
name: pv-demo1
|
||||
labels:
|
||||
type: local
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
storageClassName: standard
|
||||
capacity:
|
||||
storage: 250Mi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/tmp/data1"
|
||||
type: DirectoryOrCreate
|
||||
```
|
66
CloudNative/Kubernetes/Base/PV&&PVC/Volume.md
Normal file
|
@ -0,0 +1,66 @@
|
|||
#### emptyDir
|
||||
```
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
|
||||
metadata:
|
||||
name: redis-demo-ss
|
||||
namespace: prod
|
||||
labels:
|
||||
app: redis
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
replica: 3
|
||||
serviceName: redis-svc
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
env: prod
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
name: redis-pod
|
||||
labels:
|
||||
app: redis
|
||||
env: prod
|
||||
version: v1
|
||||
spec:
|
||||
containes:
|
||||
- name: redis
|
||||
image: redis
|
||||
imagePullPolicy: Never
|
||||
ports:
|
||||
- name: redis-port
|
||||
containerPort: 6379
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: redis-volume
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: redis-volume
|
||||
emptyDir: {}
|
||||
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
partition: 1
|
||||
|
||||
```
|
||||
|
||||
#### hostDir
|
||||
```
|
||||
...
|
||||
volumes:
|
||||
- name: redis-volume
|
||||
hostDir:
|
||||
path: /data
|
||||
```
|
||||
#### NFS
|
||||
```
|
||||
...
|
||||
volumes:
|
||||
- name: redis-volume
|
||||
nfs:
|
||||
server: 192.168.11.1 ## nfs服务器ip或域名
|
||||
path: "/test" ## nfs服务器共享的目录
|
||||
```
|
18
CloudNative/Kubernetes/Base/PV&&PVC/pv与pvc
Normal file
|
@ -0,0 +1,18 @@
|
|||
### 静态pv
|
||||
pod.spec.volumes:期望pv创建
|
||||
关联
|
||||
pod.spec.containers.volumeMounts:挂载
|
||||
|
||||
## 创建pv
|
||||
PersistentVolume
|
||||
主要字段:
|
||||
pv.spec.capacity.storage: 存储大小,如果使用1024进制的存储大小,使用Gi,Mi等,如6Gi
|
||||
pv.spec.accessModes: 列表格式,ReadOnlyMany,多个node只读,ReadWriteOne,一个node读写,ReadWriteMany,多个mode读写
|
||||
pv.spec.persistentVolumeReclaimPolicy:pv被release以后的处理,Retain,保留,Delete,删除
|
||||
|
||||
### 动态pv
|
||||
StorageClass中的name与创建的PersistentVolumeClaim中的pvc.spec.storageClassName对应
|
||||
StorageClass.reclainPolicy:pv被release后的处理。Retain,保留,Delete,删除
|
||||
|
||||
|
||||
CSI Container Storage Interface 容器存储接口
|
1
CloudNative/Kubernetes/Base/PodPreset.md
Normal file
|
@ -0,0 +1 @@
|
|||
PodPreset 是一种 K8s API 资源,用于在创建 Pod 时注入其他运行时需要的信息,这些信息包括 secrets、volume mounts、environment variables 等,我们可以使用标签选择器来指定某个或某些 Pod,来将 PodPreset 预设信息应用上去。使用 PodPreset 的好处就是我们可以将一些常用 Pod 预设信息配置为模板,这样就不需要显式为每个 Pod 提供所有信息,简化 Pod 初始化配置,还能起到配置统一的效果。**一般未启用**
|
25
CloudNative/Kubernetes/Base/Pod标签.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
```
|
||||
## 查看命名空间default中的pod
|
||||
kubectl get pods -n default --show-labels
|
||||
```
|
||||
结果如下:
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE LABELS
|
||||
demo 1/1 Running 1 8d env=test,app=demo,version=v1
|
||||
|
||||
```
|
||||
```
|
||||
## 修改label
|
||||
kubectl label -n default demo env=pre --ovwewrite
|
||||
```
|
||||
|
||||
```
|
||||
## 去掉label,标签名字后边加上一个减号(-)
|
||||
kubectl label -n default demo version-
|
||||
```
|
||||
|
||||
```
|
||||
## 查询标签为某个(些)值的pod
|
||||
kubectl get pods -n default --show-labels -l env=pre
|
||||
kubectl get pods -n default --show-labels -l 'env in (pre,test)'
|
||||
```
|
56
CloudNative/Kubernetes/Base/PreStop和PostStart.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
#### preStop,pod停止前操作(优雅停止服务)
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: PreStop-demo-v1
|
||||
namespace: demo-ns
|
||||
labels:
|
||||
app: prestop
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: prestop-demo
|
||||
image: busybox
|
||||
ports:
|
||||
- name: pre-port
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec: ## httpGet 或 tcpSocket
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
- 'echo stop'
|
||||
```
|
||||
|
||||
#### postStart,pod启动前操作
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
||||
metadata:
|
||||
name: PostStart-demo-v1
|
||||
namespace: demo-ns
|
||||
labels:
|
||||
app: poststart
|
||||
version: v1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: poststart-demo
|
||||
image: busybox
|
||||
ports:
|
||||
- name: post-port
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
|
||||
lifecycle:
|
||||
postStart:
|
||||
tcpSocket:
|
||||
port: 80
|
||||
|
||||
```
|
47
CloudNative/Kubernetes/Base/Role和ClusterRole.md
Normal file
|
@ -0,0 +1,47 @@
|
|||
```
|
||||
## role用于某个namespace
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: default
|
||||
name: pod-reader
|
||||
rules:
|
||||
- apiGroups: [""] #"" indicates the core API group
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
```
|
||||
|
||||
```
|
||||
## clusterrole用于集群级资源或非资源类的api或者多个namespace
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
name: secret-reader
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
```
|
||||
|
||||
```
|
||||
# RoleBinding 示例(引用 Role)
|
||||
# This role binding allows "jane" to read pods in the "default" namespace.
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: read-pods
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: User
|
||||
name: jane
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: pod-reader
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
```
|
7
CloudNative/Kubernetes/Base/Traefik.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
### 安装
|
||||
```
|
||||
## 添加库
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
## 安装到default命名空间
|
||||
helm install traefik traefik/traefik
|
||||
```
|
BIN
CloudNative/Kubernetes/Base/flannel-udp.png
Normal file
After Width: | Height: | Size: 103 KiB |
BIN
CloudNative/Kubernetes/Base/flannel-vxlan.png
Normal file
After Width: | Height: | Size: 106 KiB |
24
CloudNative/Kubernetes/Base/ingress配置规则
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: extensions/vbeta1
|
||||
kind: Ingress
|
||||
|
||||
metadata:
|
||||
name: ingress-traefic
|
||||
namespace: kube-public
|
||||
labels:
|
||||
app: ingress-demo1
|
||||
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- test.test.com
|
||||
secretName: secret-test-com # 与secret中的metadata对应
|
||||
rules:
|
||||
- host: test.test.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: sv-demo1 # 与对应的service中metadata中的 name相对应
|
||||
servicePort: 8080
|
||||
|
||||
|
16
CloudNative/Kubernetes/Base/job相关
Normal file
|
@ -0,0 +1,16 @@
|
|||
job.spec.backoffLimit:重试次数限制
|
||||
|
||||
job.spec.template.spec.restartPolicy:重启策略,Always,OnFailure,Never
|
||||
|
||||
|
||||
## 并行job
|
||||
job.spec.completions:job总共被执行的次数
|
||||
job.spec.parallelism:并行执行个数
|
||||
|
||||
|
||||
## CronJob
|
||||
cronjob.spec.schedule: 定时,与crontab格式一样
|
||||
|
||||
cronjob.spec.startingDeadlineSeconds:job最长启动时间
|
||||
cronjob.spec.concurrencyPolicy:是否允许并行运行
|
||||
cronjob.spec.successfulJobsHistoryLimit:允许存留历史job个数
|
45
CloudNative/Kubernetes/Base/k8s-deployment.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
```
|
||||
apiVersion: app/v1 # kubectl explain deployment
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: deploy-demo
|
||||
namespace: default
|
||||
|
||||
spec:
|
||||
replica: 2
|
||||
selector: # 选择器匹配[与以下templatelabels匹配]
|
||||
matchLabels:
|
||||
app: myapp-demo
|
||||
release: v2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: myapp-demo
|
||||
release: v2
|
||||
spec:
|
||||
containers:
|
||||
- name: myapp
|
||||
image: xxx/myapp
|
||||
ports:
|
||||
- name: demo-http
|
||||
containerPort:80
|
||||
```
|
||||
## 创建
|
||||
kubectl apply -f deploy-demo.yaml
|
||||
## 查看deployment
|
||||
kubectl get deployment
|
||||
## 项目详细信息
|
||||
kubectl describe deploy名称
|
||||
## 回滚
|
||||
kubectl rollout undo deployment deploy-demo --to-revision=版本
|
||||
## 暂停
|
||||
kubectl rollout pause deployment deploy-demo
|
||||
## 暂停恢复
|
||||
kubectl rollout resume deployment deploy-demo
|
||||
## 查询升级状态
|
||||
kubectl rollout status deployment deploy-demo
|
||||
## 查询升级历史
|
||||
kubectl rollout history deployment deployment
|
||||
## 打补丁[json格式]
|
||||
kubectl patch deployment deploy-demo -p '{"spec":{"replica":5}}'
|
||||
或kubectl set image deployment deploy-demo image=xxx/myapp:v1
|
18
CloudNative/Kubernetes/Base/k8s-ingres.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ingress-demo
|
||||
namespace:default
|
||||
annitations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
rules:
|
||||
- host: demo.test.com
|
||||
http:
|
||||
paths:
|
||||
- path: / # 默认是/,可以是其他路径
|
||||
backend:
|
||||
serviceName: svc-demo
|
||||
servicePort: 80
|
||||
```
|
28
CloudNative/Kubernetes/Base/k8s-rs.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
```
|
||||
apiVersion: apps/v1 # kubectl explian rs查看
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: myapp
|
||||
namespace: default
|
||||
|
||||
spec:
|
||||
replicas: 2 # 副本数
|
||||
selector: # 选择标签匹配的容器[与template中的匹配]
|
||||
matchLabels:
|
||||
app: myapp
|
||||
release: v1
|
||||
|
||||
template:
|
||||
metadata:
|
||||
name: app-demo
|
||||
labels:
|
||||
app:myapp
|
||||
release: v1
|
||||
spec:
|
||||
container:
|
||||
name: myapp-ct
|
||||
image: xxx/myapp.v1
|
||||
ports:
|
||||
name: myapp-http
|
||||
containerPort:80
|
||||
```
|
6
CloudNative/Kubernetes/Base/k8s-stateful.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
stateful[有状态的服务]:
|
||||
headless,直达后端pod
|
||||
StatefulSet
|
||||
volumeClaimTemplates[数据卷]
|
||||
|
||||
|
45
CloudNative/Kubernetes/Base/k8s-svc.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
```
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: svc-demo
|
||||
namespace: default
|
||||
spec:
|
||||
selector: # 选择器,与rs标签匹配
|
||||
app: rs-demo
|
||||
type: ClusterIP # 仅仅使用一个集群内部的IP地址 - 这是默认值。选择这个值意味着你只想这个服务在集群内部才可以被访问到
|
||||
clusterIP: 172.10.1.9
|
||||
ports:
|
||||
port: 80
|
||||
targetPort:80
|
||||
```
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: svc-demo
|
||||
namespace: default
|
||||
spec:
|
||||
selector: # 选择器,与rs标签匹配
|
||||
app: rs-demo
|
||||
type: NodePort # 在集群内部IP的基础上,在集群的每一个节点的端口上开放这个服务。你可以在任意<NodeIP>:NodePort地址上访问到这个服务。
|
||||
# clusterIP: 172.10.1.9
|
||||
ports:
|
||||
port: 80
|
||||
targetPort:80
|
||||
```
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: svc-demo
|
||||
namespace: default
|
||||
spec:
|
||||
selector: # 选择器,与rs标签匹配
|
||||
app: rs-demo
|
||||
type: None # None无头svc,headless,一般用于statefulset,有状态的
|
||||
# clusterIP: 172.10.1.9
|
||||
ports:
|
||||
port: 80
|
||||
targetPort:80
|
||||
```
|
62
CloudNative/Kubernetes/Base/k8s优先级与抢占调度.md
Normal file
|
@ -0,0 +1,62 @@
|
|||
1、先到先得,简单,公平
|
||||
2、优先级策略(Priority),符合公司业务特点
|
||||
|
||||
# PodPriority
|
||||
优先级调度配置
|
||||
1、创建PriorityClass
|
||||
2、为各个pod配置上不同的priorityClassName
|
||||
```
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: high
|
||||
value: 1000
|
||||
globalDefault: false
|
||||
```
|
||||
```
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: low
|
||||
value: 10
|
||||
globalDefault: false
|
||||
```
|
||||
为pod1配置上high,为pod2配置上low
|
||||
```
|
||||
...
|
||||
spec:
|
||||
priorityClassName: high # 或者low
|
||||
...
|
||||
```
|
||||
|
||||
# 一些内置的优先级设置
|
||||
### 内置的默认优先级
|
||||
```
|
||||
DefaultPriorityWhenNoDefaultClassExists=0
|
||||
如果未设置优先级,那么默认优先级都为0
|
||||
```
|
||||
|
||||
### 用户可配置的最大优先级限制
|
||||
```
|
||||
HighestUserDefinablePriority=10亿
|
||||
```
|
||||
### 系统级别优先级
|
||||
```
|
||||
SystemCriticalPriority=20亿
|
||||
```
|
||||
|
||||
### 内置系统级别优先级
|
||||
```
|
||||
system-cluster-critical
|
||||
system-node-critical
|
||||
```
|
||||
|
||||
#
|
||||
抢占调度
|
||||
1、优先选择打破pdb最少的节点(Pod Disruption Budget,Pod中断预算)
|
||||
2、选择待抢占的pods中最大优先级最小的节点
|
||||
3、选择待抢占的pod优先级加和最小的节点
|
||||
4、接下来选择待抢占pod数量最小的节点
|
||||
5、最后选择拥有最晚启动pod的节点
|
||||
|
||||
|
98
CloudNative/Kubernetes/Base/k8s各组件版本支持策略.md
Normal file
|
@ -0,0 +1,98 @@
|
|||
## 版本支持策略
|
||||
Kubernetes 版本号格式为 x.y.z,其中 x 为大版本号,y 为小版本号,z 为补丁版本号。 版本号格式遵循 Semantic Versioning 规则。 更多信息,请参阅 Kubernetes 发布版本。
|
||||
|
||||
## 版本偏差策略
|
||||
### kube-apiserver
|
||||
在 高可用(HA)集群 中, 多个 kube-apiserver 实例小版本号最多差1。
|
||||
|
||||
例如:
|
||||
|
||||
最新的 kube-apiserver 版本号如果是 1.19
|
||||
则受支持的 kube-apiserver 版本号包括 1.19 和 1.18
|
||||
|
||||
### kubelet
|
||||
kubelet 版本号不能高于 kube-apiserver,最多可以比 kube-apiserver 低两个小版本。
|
||||
|
||||
例如:
|
||||
|
||||
kube-apiserver 版本号如果是 1.19
|
||||
受支持的的 kubelet 版本将包括 1.19、1.18 和 1.17
|
||||
说明: 如果 HA 集群中多个 kube-apiserver 实例版本号不一致,相应的 kubelet 版本号可选范围也要减小。
|
||||
例如:
|
||||
|
||||
如果 kube-apiserver 实例同时存在 1.19 和 1.18
|
||||
kubelet 的受支持版本将是 1.18 和 1.17 (1.19 不再支持,因为它比 1.18 版本的 kube-apiserver 更新)
|
||||
|
||||
### kube-controller-manager、 kube-scheduler 和 cloud-controller-manager
|
||||
kube-controller-manager、kube-scheduler 和 cloud-controller-manager 版本不能高于 kube-apiserver 版本号。 最好它们的版本号与 kube-apiserver 保持一致,但允许比 kube-apiserver 低一个小版本(为了支持在线升级)。
|
||||
|
||||
例如:
|
||||
|
||||
如果 kube-apiserver 版本号为 1.19
|
||||
kube-controller-manager、kube-scheduler 和 cloud-controller-manager 版本支持 1.19 和 1.18
|
||||
说明: 如果在 HA 集群中,多个 kube-apiserver 实例版本号不一致,他们也可以跟任意一个 kube-apiserver 实例通信(例如,通过 load balancer), 但 kube-controller-manager、kube-scheduler 和 cloud-controller-manager 版本可用范围会相应的减小。
|
||||
例如:
|
||||
|
||||
kube-apiserver 实例同时存在 1.19 和 1.18 版本
|
||||
kube-controller-manager、kube-scheduler 和 cloud-controller-manager 可以通过 load balancer 与所有的 kube-apiserver 通信
|
||||
kube-controller-manager、kube-scheduler 和 cloud-controller-manager 可选版本为 1.18 (1.19 不再支持,因为它比 1.18 版本的 kube-apiserver 更新)
|
||||
|
||||
### kubectl
|
||||
kubectl 可以比 kube-apiserver 高一个小版本,也可以低一个小版本。
|
||||
|
||||
例如:
|
||||
|
||||
如果 kube-apiserver 当前是 1.19 版本
|
||||
kubectl 则支持 1.20、1.19 和 1.18
|
||||
说明: 如果 HA 集群中的多个 kube-apiserver 实例版本号不一致,相应的 kubectl 可用版本范围也会减小。
|
||||
例如:
|
||||
|
||||
kube-apiserver 多个实例同时存在 1.19 和 1.18
|
||||
kubectl 可选的版本为 1.19 和 1.18(其他版本不再支持,因为它会比其中某个 kube-apiserver 实例高或低一个小版本)
|
||||
|
||||
## 支持的组件升级次序
|
||||
组件之间支持的版本偏差会影响组件升级的顺序。 本节描述组件从版本 1.18 到 1.19 的升级次序。
|
||||
|
||||
### kube-apiserver
|
||||
前提条件:
|
||||
|
||||
单实例集群中,kube-apiserver 实例版本号须是 1.18
|
||||
高可用(HA)集群中,所有的 kube-apiserver 实例版本号必须是 1.18 或 1.19(确保满足最新和最旧的实例小版本号相差不大于1)
|
||||
kube-controller-manager、kube-scheduler 和 cloud-controller-manager 版本号必须为 1.18(确保不高于 API server 的版本,且版本号相差不大于1)
|
||||
kubelet 实例版本号必须是 1.18 或 1.17(确保版本号不高于 API server,且版本号相差不大于2)
|
||||
注册的 admission 插件必须能够处理新的 kube-apiserver 实例发送过来的数据:
|
||||
ValidatingWebhookConfiguration 和 MutatingWebhookConfiguration 对象必须升级到可以处理 1.19 版本新加的 REST 资源(或使用 1.15 版本提供的 matchPolicy: Equivalent 选项)
|
||||
插件可以处理任何 1.19 版本新的 REST 资源数据和新加的字段
|
||||
升级 kube-apiserver 到 1.19
|
||||
|
||||
说明:
|
||||
根据 API 弃用策略 和 API 变更指南, kube-apiserver 不能跨小版本号升级,即使是单实例集群也不可以。
|
||||
|
||||
### kube-controller-manager、kube-scheduler 和 cloud-controller-manager
|
||||
前提条件:
|
||||
|
||||
kube-apiserver 实例必须为 1.19 (HA 集群中,所有的kube-apiserver 实例必须在组件升级前完成升级)
|
||||
升级 kube-controller-manager、kube-scheduler 和 cloud-controller-manager 到 1.19
|
||||
|
||||
### kubelet
|
||||
前提条件:
|
||||
|
||||
kube-apiserver 实例必须为 1.19 版本
|
||||
kubelet 可以升级到 1.19(或者停留在 1.18 或 1.17)
|
||||
|
||||
警告:
|
||||
集群中 kubelet 版本号不建议比 kube-apiserver 低两个版本号:
|
||||
|
||||
它们必须升级到与 kube-apiserver 相差不超过 1 个小版本,才可以升级其他控制面组件
|
||||
有可能使用低于 3 个在维护的小版本
|
||||
|
||||
### kube-proxy
|
||||
kube-proxy 必须与节点上的 kubelet 的小版本相同
|
||||
kube-proxy 一定不能比 kube-apiserver 小版本更新
|
||||
kube-proxy 最多只能比 kube-apiserver 早两个小版本
|
||||
例如:
|
||||
|
||||
如果 kube-proxy 的版本是 1.17:
|
||||
|
||||
kubelet 版本必须相同,也是 1.17
|
||||
kube-apiserver 版本必须在 1.17 到 1.19 之间(闭区间)
|
160
CloudNative/Kubernetes/Base/k8s基础.md
Normal file
|
@ -0,0 +1,160 @@
|
|||
### 端口:
|
||||
master
|
||||
apiserver:6443(https), 8080
|
||||
etcd:2379(外部访问。client访问),2380(etcd集群互访)
|
||||
kubelet: 10250
|
||||
kube-scheduler(healthy):10251
|
||||
controler-manager(healthy):10252
|
||||
cloud-manager(healthy):10253
|
||||
kubulet api: 10255
|
||||
kube-proxy(healthy):10256
|
||||
|
||||
|
||||
|
||||
|
||||
### k8s架构
|
||||
master:api server,scheduler,controller manager
|
||||
node:kubelet,kube-proxy
|
||||
|
||||
### k8s最小调度单元:pod
|
||||
pod有标签(label),标签选择器(label selector)
|
||||
|
||||
pod分为自主式与控制器管理的pod
|
||||
|
||||
##控制器类型:
|
||||
RepllicationController
|
||||
ReplicaSet
|
||||
Deployment
|
||||
StatefulSet
|
||||
DaemonSet
|
||||
Job
|
||||
CronJob
|
||||
HPA(HorizontalPodAutoscaler,自动扩展)
|
||||
|
||||
##CNI:
|
||||
flannel:网络配置
|
||||
calica:网络配置,网络策略
|
||||
...
|
||||
|
||||
### 资源对象:
|
||||
负载类型资源:pod,ReplicaSet,Deployment,service,Job,CronJob
|
||||
|
||||
### pod生命周期:
|
||||
初始化容器:init container
|
||||
启动后:start post
|
||||
存活状态检测:liveness probe
|
||||
就绪检测:readiness probe
|
||||
停止前: pre stop
|
||||
---
|
||||
显示集群的运行信息:kubectl cluster-info
|
||||
|
||||
显示集群各组件的运行信息:kubectl get cs
|
||||
|
||||
显示apiserver各版本:kubectl api-version
|
||||
|
||||
查看ReplicaSet的apiVersion:kubelet explian rs
|
||||
|
||||
获取pod简单信息:kubectl get pod
|
||||
|
||||
获取pod信息并显示label:kubectl get pod --show-labels
|
||||
|
||||
获取pod详细信息:kubectl get pod -o wide
|
||||
|
||||
获取pod信息并显示为yaml格式:kubectl get pod -o yaml
|
||||
|
||||
持续查看pod信息:kubectl get pod -w (watch)
|
||||
|
||||
显示标签中含有xxx标签的pod:kubectl get pod -l xxx
|
||||
|
||||
为pod打标签:kubectl label pod pod名称 标签key=标签value --overwrite
|
||||
|
||||
查看pod的描述信息:kubectl describe xxx
|
||||
|
||||
扩缩容器数量:kubectl scale --replicas=3 xxx
|
||||
|
||||
回滚到上一个版本:kubectl rollout undo pod名称
|
||||
|
||||
修改配置信息:kubectl edit
|
||||
|
||||
查看资源字段定义及使用:kubectl explain 资源字段
|
||||
|
||||
从dockerhub中拉取最新的nginx镜像,运行端口80,一个副本:kubectl run nginx-deploy --image=nginx --port=80 --relicas=1
|
||||
|
||||
查看pod日志:kubelet logs pod名称 容器名称
|
||||
|
||||
进入pod中的容器里:kubelet exec -it pod名称 -c 容器名称 -- /bin/sh
|
||||
|
||||
删除资源:kubectl delete
|
||||
删除指定配置资源清单:kubectl delete -f xxx.yaml
|
||||
删除指定资源:kubelet delete 资源名称 -n namespace
|
||||
|
||||
禁止pod调度到该node:kubectl cordon <none>
|
||||
|
||||
驱逐节点上的所有pod:kubectl drain <node>
|
||||
|
||||
节点重新添加到集群:kubectl uncordon <node>
|
||||
|
||||
为节点设置污点:kubectl taint nodes node1 key1=value1:NoSchedule
|
||||
|
||||
---
|
||||
|
||||
大部分资源的配置清单
|
||||
---
|
||||
apiVersion:group/version # kubectl explain 资源类型
|
||||
|
||||
kind: 资源类别
|
||||
|
||||
metadata:元数据
|
||||
name:资源名称
|
||||
namespace:资源命名空间
|
||||
labels:标签
|
||||
annotations:
|
||||
spec:规格,期望的状态
|
||||
containers:
|
||||
livenessProbe:存活探针
|
||||
httpGet:
|
||||
port:端口或容器中ports定义的端口对应的name
|
||||
path:/index.html
|
||||
exec:
|
||||
command:[命令或脚本]
|
||||
initialDelaySeconds:数字,延迟时间,秒
|
||||
periodSeconds:数字,探测间隔,秒
|
||||
readinessProbe:就绪探针
|
||||
httpGet:
|
||||
port:端口或容器中ports定义的端口对应的name
|
||||
path:/index.html
|
||||
exec:
|
||||
command:[命令或脚本]
|
||||
initialDelaySeconds:数字,延迟时间,秒
|
||||
periodSeconds:数字,探测间隔,秒
|
||||
- name:名称
|
||||
image:镜像地址
|
||||
args:[entrypoint]
|
||||
lifecycle:存活钩子
|
||||
postStart:
|
||||
exec:
|
||||
command
|
||||
- name:名称
|
||||
image:镜像地址
|
||||
imagePullPolicy:Always[总是去拉取镜像],Never[从来不去拉取],IfNotPresent[本地不存在,拉取]
|
||||
ports:端口列表
|
||||
- name:端口名称
|
||||
containerPort:端口
|
||||
- name:端口名称
|
||||
containerPort:端口
|
||||
protocol:协议,默认tcp
|
||||
command:[命令]
|
||||
|
||||
restartPolicy:Always[总是重启],OnFailure[失败重启],Never[不重启],默认时Always
|
||||
|
||||
nodeSelector:{节点标签选择器}
|
||||
nodeName:节点名称
|
||||
annotations:与label不同地方在于,它不能用于挑选资源对象,仅用于为对象提供“元数据”
|
||||
|
||||
|
||||
status:状态只读字段,不需要定义
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
27
CloudNative/Kubernetes/Base/k8s权限控制
Normal file
|
@ -0,0 +1,27 @@
|
|||
RBAC基于角色的访问控制--全拼Role-Based Access Control
|
||||
|
||||
Service Account为服务提供了一种方便的认证机制,但它不关心授权的问题。可以配合RBAC来为Service Account鉴权
|
||||
|
||||
在Kubernetes中,授权有ABAC(基于属性的访问控制)、RBAC(基于角色的访问控制)、Webhook、Node、AlwaysDeny(一直拒绝)和AlwaysAllow(一直允许)这6种模式。
|
||||
|
||||
在RABC API中,通过如下的步骤进行授权:
|
||||
1)定义角色:在定义角色时会指定此角色对于资源的访问控制的规则;
|
||||
2)绑定角色:将主体与角色进行绑定,对用户进行访问授权。
|
||||
Role与ClusterRole
|
||||
一个角色包含了一套表示一组权限的规则。 权限以纯粹的累加形式累积(没有"否定"的规则)。
|
||||
Role:角色可以由命名空间内的Role对象定义,一个Role对象只能用于授予对某一单一命名空间中资源的访问权限
|
||||
ClusterRole:整个Kubernetes集群范围内有效的角色则通过ClusterRole对象实现。
|
||||
简介
|
||||
role:
|
||||
1、允许的操作,如get,list等
|
||||
|
||||
2、允许操作的对象,如pod,svc等
|
||||
|
||||
rolebinding:
|
||||
|
||||
将哪个用户绑定到哪个role或clusterrole上
|
||||
|
||||
clusterrole:(集群角色)
|
||||
clusterrolebinding:(绑定到集群)
|
||||
3、如果使用rolebinding绑定到clusterrole上,表示绑定的用户只能用于当前namespace的权限
|
||||
创建k8s账号与RBAC授权使用
|
1
CloudNative/Kubernetes/Base/k8s查看serviceAccount的相关配置
Normal file
|
@ -0,0 +1 @@
|
|||
kubectl describe secret [serviceaccount] -n kube-system
|
445
CloudNative/Kubernetes/Base/k8s证书浅析
Normal file
|
@ -0,0 +1,445 @@
|
|||
在进行二进制搭建K8S集群前,我们需要梳理最磨人的一个点,就是各种各样的证书。
|
||||
|
||||
官方文档参考:https://kubernetes.io/docs/setup/certificates/
|
||||
|
||||
一共有多少证书:
|
||||
|
||||
先从Etcd算起:
|
||||
|
||||
1、Etcd对外提供服务,要有一套etcd server证书
|
||||
|
||||
2、Etcd各节点之间进行通信,要有一套etcd peer证书
|
||||
|
||||
3、Kube-APIserver访问Etcd,要有一套etcd client证书
|
||||
|
||||
再算kubernetes:
|
||||
|
||||
4、Kube-APIserver对外提供服务,要有一套kube-apiserver server证书
|
||||
|
||||
5、kube-scheduler、kube-controller-manager、kube-proxy、kubelet和其他可能用到的组件,需要访问kube-APIserver,要有一套kube-APIserver client证书
|
||||
|
||||
6、kube-controller-manager要生成服务的service account,要有一对用来签署service account的证书(CA证书)
|
||||
|
||||
7、kubelet对外提供服务,要有一套kubelet server证书
|
||||
|
||||
8、kube-APIserver需要访问kubelet,要有一套kubelet client证书
|
||||
|
||||
加起来共8套,但是这里的“套”的含义我们需要理解。
|
||||
|
||||
同一个套内的证书必须是用同一个CA签署的,签署不同套里的证书的CA可以相同,也可以不同。例如,所有etcd server证书需要是同一个CA签署的,所有的etcd peer证书也需要是同一个CA签署的,而一个etcd server证书和一个etcd peer证书,完全可以是两个CA机构签署的,彼此没有任何关系。这算两套证书。
|
||||
|
||||
为什么同一个“套”内的证书必须是同一个CA签署的
|
||||
|
||||
原因在验证这些证书的一端。因为在要验证这些证书的一端,通常只能指定一个Root CA。这样一来,被验证的证书自然都需要是被这同一个Root CA对应的私钥签署,不然不能通过认证。
|
||||
|
||||
其实实际上,使用一套证书(都使用一套CA来签署)一样可以搭建出K8S,一样可以上生产,但是理清这些证书的关系,在遇到因为证书错误,请求被拒绝的现象的时候,不至于无从下手,而且如果没有搞清证书之间的关系,在维护或者解决问题的时候,贸然更换了证书,弄不好会把整个系统搞瘫。
|
||||
|
||||
TLS bootstrapping 简化kubelet证书制作
|
||||
|
||||
Kubernetes1.4版本引入了一组签署证书用的API。这组API的引入,使我们可以不用提前准备kubelet用到的证书。
|
||||
|
||||
官网地址:https://kubernetes.io/docs/tasks/tls/certificate-rotation/
|
||||
|
||||
每个kubelet用到的证书都是独一无二的,因为它要绑定各自的IP地址,于是需要给每个kubelet单独制作证书,如果业务量很大的情况下,node节点会很多,这样一来kubelet的数量也随之增加,而且还会经常变动(增减Node)kubelet的证书制作就成为一件很麻烦的事情。使用TLS bootstrapping就可以省事儿很多。
|
||||
|
||||
工作原理:Kubelet第一次启动的时候,先用同一个bootstrap token作为凭证。这个token已经被提前设置为隶属于用户组system:bootstrappers,并且这个用户组的权限也被限定为只能用来申请证书。 用这个bootstrap token通过认证后,kubelet申请到属于自己的两套证书(kubelet server、kube-apiserver client for kubelet),申请成功后,再用属于自己的证书做认证,从而拥有了kubelet应有的权限。这样一来,就去掉了手动为每个kubelet准备证书的过程,并且kubelet的证书还可以自动轮替更新
|
||||
|
||||
参考文档:
|
||||
|
||||
https://mritd.me/2018/01/07/kubernetes-tls-bootstrapping-note/
|
||||
|
||||
https://www.jianshu.com/p/bb973ab1029b
|
||||
|
||||
kubelet证书为何不同
|
||||
|
||||
这样做是一个为了审计,另一个为了安全。 每个kubelet既是服务端(kube-apiserver需要访问kubelet),也是客户端(kubelet需要访问kube-apiserver),所以要有服务端和客户端两组证书。
|
||||
|
||||
服务端证书需要与服务器地址绑定,每个kubelet的地址都不相同,即使绑定域名也是绑定不同的域名,故服务端地址不同
|
||||
|
||||
客户端证书也不应相同,每个kubelet的认证证书与所在机器的IP绑定后,可以防止一个kubelet的认证证书泄露以后,使从另外的机器上伪造的请求通过验证。
|
||||
|
||||
安全方面,如果每个node上保留了用于签署证书的bootstrap token,那么bootstrap token泄漏以后,是不是可以随意签署证书了?安全隐患非常大。所以,kubelet启动成功以后,本地的bootstrap token需要被删除。
|
||||
|
||||
正式制作证书
|
||||
|
||||
虽然可以用多套证书,但是维护多套CA实在过于繁杂,这里还是用一个CA签署所有证书。
|
||||
|
||||
需要准备的证书:
|
||||
|
||||
admin.pem
|
||||
ca.-key.pem
|
||||
ca.pem
|
||||
admin-key.pem
|
||||
admin.pem
|
||||
kube-scheduler-key.pem
|
||||
kube-scheduler.pem
|
||||
kube-controller-manager-key.pem
|
||||
kube-controller-manager.pem
|
||||
kube-proxy-key.pem
|
||||
|
||||
kube-proxy.pem
|
||||
|
||||
kubernetes-key.pem
|
||||
|
||||
kubernetes.pem
|
||||
|
||||
使用证书的组件如下:
|
||||
|
||||
etcd:使用 ca.pem kubernetes-key.pem kubernetes.pem
|
||||
|
||||
kube-apiserver:使用 ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem
|
||||
|
||||
kubelet:使用 ca.pem
|
||||
|
||||
kube-proxy:使用 ca.pem kube-proxy-key.pem kube-proxy.pem
|
||||
|
||||
kubectl:使用 ca.pem admin-key.pem、admin.pem
|
||||
|
||||
kube-controller-manager:使用 ca-key.pem ca.pem kube-controller-manager-key.pem kube-controller-manager.pem
|
||||
|
||||
kube-scheduler: 使用 kube-scheduler-key.pem kube-scheduler.pem
|
||||
我们使用CFSSL来制作证书,它是cloudflare开发的一个开源的PKI工具,是一个完备的CA服务系统,可以签署、撤销证书等,覆盖了一个证书的整个生命周期,后面只用到了它的命令行工具。
|
||||
|
||||
注:一般情况下,K8S中证书只需要创建一次,以后在向集群中添加新节点时只要将/etc/kubernetes/ssl目录下的证书拷贝到新节点上即可。
|
||||
|
||||
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
|
||||
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
|
||||
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
|
||||
mv cfssl_linux-amd64 /usr/local/bin/cfssl
|
||||
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
|
||||
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
|
||||
|
||||
创建CA证书
|
||||
|
||||
创建证书配置文件
|
||||
vim ca-config.json
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "87600h"
|
||||
},
|
||||
"profiles": {
|
||||
"kubernetes": {
|
||||
"usages": [
|
||||
"signing",
|
||||
"key encipherment",
|
||||
"server auth",
|
||||
"client auth"
|
||||
],
|
||||
"expiry": "87600h"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
字段说明:
|
||||
|
||||
ca-config.json:可以定义多个 profiles,分别指定不同的过期时间、使用场景等参数;后续在签名证书时使用某个 profile;
|
||||
|
||||
signing:表示该证书可以签名其他证书;生成的ca.pem证书中 CA=TRUE;
|
||||
|
||||
server auth:表示client可以用该 CA 对server提供的证书进行验证;
|
||||
|
||||
client auth:表示server可以用该CA对client提供的证书进行验证;
|
||||
|
||||
expiry:过期时间
|
||||
|
||||
创建CA证书签名请求文件
|
||||
|
||||
vim ca-csr.json
|
||||
|
||||
|
||||
{
|
||||
"CN": "kubernetes",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "k8s",
|
||||
"OU": "System"
|
||||
}
|
||||
],
|
||||
"ca": {
|
||||
"expiry": "87600h"
|
||||
}
|
||||
}
|
||||
字段说明:
|
||||
|
||||
“CN”:Common Name,kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name);浏览器使用该字段验证网站是否合法;
|
||||
|
||||
“O”:Organization,kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group)
|
||||
|
||||
生成CA证书和私钥
|
||||
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
|
||||
|
||||
ls | grep ca
|
||||
ca-config.json
|
||||
ca.csr
|
||||
ca-csr.json
|
||||
ca-key.pem
|
||||
ca.pem
|
||||
其中ca-key.pem是ca的私钥,ca.csr是一个签署请求,ca.pem是CA证书,是后面kubernetes组件会用到的RootCA。
|
||||
|
||||
创建kubernetes证书
|
||||
|
||||
在创建这个证书之前,先规划一下架构
|
||||
|
||||
k8s-master1 10.211.55.11
|
||||
|
||||
k8s-master2 10.211.55.12
|
||||
|
||||
k8s-master3 10.211.55.13
|
||||
|
||||
etcd01 10.211.55.11
|
||||
|
||||
etcd02 10.211.55.12
|
||||
|
||||
etcd03 10.211.55.13
|
||||
|
||||
VIP 10.211.55.8
|
||||
|
||||
创建kubernetes证书签名请求文件
|
||||
|
||||
|
||||
|
||||
vim kubernetes-csr.json
|
||||
{
|
||||
"CN": "kubernetes",
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"10.211.55.11",
|
||||
"10.211.55.12",
|
||||
"10.211.55.13",
|
||||
"10.211.55.8",
|
||||
"10.0.0.1",
|
||||
"k8s-master1",
|
||||
"k8s-master2",
|
||||
"k8s-master3",
|
||||
"etcd01",
|
||||
"etcd02",
|
||||
|
||||
"etcd03",
|
||||
"kubernetes",
|
||||
"kube-api.wangdong.com",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster",
|
||||
"kubernetes.default.svc.cluster.local"
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "k8s",
|
||||
"OU": "System"
|
||||
}
|
||||
]
|
||||
}
|
||||
字段说明:
|
||||
|
||||
如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。
|
||||
|
||||
由于该证书后续被 etcd 集群和 kubernetes master 集群使用,将etcd、master节点的IP都填上,同时还有service网络的首IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.0.0.1)
|
||||
|
||||
三个etcd,三个master,以上物理节点的IP也可以更换为主机名。
|
||||
|
||||
生成kubernetes证书和私钥
|
||||
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
|
||||
ls |grep kubernetes
|
||||
kubernetes.csr
|
||||
kubernetes-csr.json
|
||||
kubernetes-key.pem
|
||||
kubernetes.pem
|
||||
创建admin证书
|
||||
|
||||
创建admin证书签名请求文件
|
||||
|
||||
vim admin-csr.json
|
||||
{
|
||||
"CN": "admin",
|
||||
"hosts": [],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "system:masters",
|
||||
"OU": "System"
|
||||
}
|
||||
]
|
||||
}
|
||||
说明:
|
||||
|
||||
后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;
|
||||
|
||||
kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;
|
||||
|
||||
O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
|
||||
|
||||
注:这个admin 证书,是将来生成管理员用的kube config 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group
|
||||
|
||||
相关权限认证可以参考下面文章
|
||||
|
||||
https://mp.weixin.qq.com/s/XIkQdh5gnr-KJhuFHboNag
|
||||
|
||||
生成admin证书和私钥
|
||||
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
|
||||
ls | grep admin
|
||||
admin.csr
|
||||
admin-csr.json
|
||||
admin-key.pem
|
||||
admin.pem
|
||||
创建kube-proxy证书
|
||||
|
||||
创建 kube-proxy 证书签名请求文件
|
||||
|
||||
vim kube-proxy-csr.json
|
||||
{
|
||||
"CN": "system:kube-proxy",
|
||||
"hosts": [],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "k8s",
|
||||
"OU": "System"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
说明:
|
||||
|
||||
|
||||
|
||||
CN 指定该证书的 User 为 system:kube-proxy;
|
||||
|
||||
kube-apiserver 预定义的 RoleBinding system:node-proxier 将User system:kube-proxy 与 Role system:node-proxier 绑定,该 Role 授予了调用 kube-apiserver Proxy 相关 API 的权限;
|
||||
|
||||
该证书只会被 kubectl 当做 client 证书使用,所以 hosts 字段为空
|
||||
生成kube-proxy证书和私钥
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
|
||||
ls |grep kube-proxy
|
||||
kube-proxy.csr
|
||||
kube-proxy-csr.json
|
||||
kube-proxy-key.pem
|
||||
kube-proxy.pem
|
||||
创建kube-controoler-manager证书
|
||||
|
||||
创建 kube-controoler-manager 证书签名请求文件
|
||||
|
||||
vim kube-controller-manager-csr.json
|
||||
|
||||
|
||||
{
|
||||
"CN": "system:kube-controller-manager",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"10.211.55.11",
|
||||
"10.211.55.12",
|
||||
"10.211.55.13",
|
||||
"k8s-master1",
|
||||
"k8s-master2",
|
||||
"k8s-master3"
|
||||
],
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "system:kube-controller-manager",
|
||||
"OU": "system"
|
||||
}
|
||||
]
|
||||
}
|
||||
说明:
|
||||
|
||||
hosts 列表包含所有 kube-controller-manager 节点 IP;
|
||||
CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限
|
||||
生成kube-controoller-manager证书和私钥
|
||||
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
|
||||
|
||||
创建kube-scheduler证书
|
||||
|
||||
创建 kube-scheduler 证书签名请求文件
|
||||
|
||||
vim kube-scheduler-csr.json
|
||||
{
|
||||
"CN": "system:kube-scheduler",
|
||||
"hosts": [
|
||||
"127.0.0.1",
|
||||
"10.211.55.11",
|
||||
"10.211.55.12",
|
||||
"10.211.55.13",
|
||||
"k8s-master1",
|
||||
"k8s-master2",
|
||||
"k8s-master3",
|
||||
],
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "CN",
|
||||
"ST": "BeiJing",
|
||||
"L": "BeiJing",
|
||||
"O": "system:kube-scheduler",
|
||||
"OU": "4Paradigm"
|
||||
}
|
||||
]
|
||||
}
|
||||
说明:
|
||||
|
||||
hosts 列表包含所有 kube-scheduler 节点 IP;
|
||||
CN 为 system:kube-scheduler、O 为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 将赋予 kube-scheduler 工作所需的权限。
|
||||
|
||||
经过上述操作,我们会用到如下文件
|
||||
|
||||
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json| cfssljson -bare kube-scheduler
|
||||
ls | grep pem
|
||||
admin-key.pem
|
||||
admin.pem
|
||||
ca-key.pem
|
||||
ca.pem
|
||||
kube-proxy-key.pem
|
||||
kube-proxy.pem
|
||||
kubernetes-key.pem
|
||||
kubernetes.pem
|
||||
kube-controller-manager-key.pem
|
||||
kube-controller-manager.pem
|
||||
kube-scheduler-key.pem
|
||||
kube-scheduler.pem
|
||||
查看证书信息:
|
||||
|
||||
cfssl-certinfo -cert kubernetes.pem
|
||||
|
||||
在搭建k8s集群的时候,将这些文件分发到至此集群中其他节点机器中即可。至此,TLS证书创建完毕
|
||||
|
||||
证书这块知道怎么生成、怎么用即可,建议暂时不必过多研究
|
15
CloudNative/Kubernetes/Base/k8s调度过程与访问过程
Normal file
|
@ -0,0 +1,15 @@
|
|||
# 调度
|
||||
1、用户提交一个pod创建清单或变更清单(通过api)
|
||||
2、api会把数据写入etcd,scheduler通过api监听到etcd上的数据变化
|
||||
3、scheduler首先选择满足条件的node(各种亲和度,污点,资源等)
|
||||
4、scheduler在这些满足条件的node中,给各个node打分,最好的node会用于pod的创建运行(绑定node)
|
||||
5、kubelet通过api监听etcd,然后根据etcd中pod与node的绑定结果,在相应的node中创建node,维护生命周期
|
||||
|
||||
# 访问
|
||||
外部通过ingress或者service访问集群内部服务
|
||||
原理:service使用selector选择合适的pod标签,把内部服务暴露出去。创建service时自动生成一个同名的ep(endpoints)。ep上记录pod与ip对应关系。
|
||||
通过kube-proxy实现服务发现与负载均衡(iptables/ipvs)[headless service不使用]
|
||||
dns为每个service创建A记录(service名称与服务ip[clusterIP等])(headless service直接为在dns中配置service名称与各pod ip的记录)
|
||||
1、首先访问到与ingress匹配规则匹配到的service
|
||||
2、service通过dns查询到的service ip(或pod ip,headless service)
|
||||
3、通过负载均衡算法,访问到一个后端pod,该pod与之交互
|
55
CloudNative/Kubernetes/Base/k8s资源限制.md
Normal file
|
@ -0,0 +1,55 @@
|
|||
requests: 保底的资源要求
|
||||
limits:资源上限
|
||||
包括以下资源
|
||||
cpu
|
||||
memory
|
||||
Ephemeral storage 【Ephemeral,短暂的】
|
||||
extended-resouce:如nvidia.com/gpu
|
||||
|
||||
|
||||
pod Qos 服务质量
|
||||
Guaranteed 高,保障,request==limit
|
||||
Burstable,中,弹性 cpu/memory request与limit不相等
|
||||
bestEffort,低,尽力而为。所有资源的request与limit都不填
|
||||
|
||||
k8s是使用隐性 Qos
|
||||
|
||||
不同的Qos调度不同
|
||||
1、调度器会使用request进行调度
|
||||
cpu按照该request划分权重
|
||||
--cpu-manager-policy=static,Guaranteed整数会绑核
|
||||
|
||||
memory按Qos的OOMScore(得分越高,在node内存溢出以后,会优先剔除)
|
||||
Guaranteed -998
|
||||
Burstable 2-999
|
||||
bestEffort 1000
|
||||
|
||||
Eviction(驱逐)
|
||||
优先驱逐bestEffort
|
||||
Kubelet - CPUManager
|
||||
|
||||
|
||||
ResoureQuota【quota限制】
|
||||
限制每个namespace 的资源用量
|
||||
限制demo-ns namespace下非BestEffort Qos的Quota
|
||||
cpu只能使用1000个
|
||||
memory只能使用200Gi
|
||||
pod只能创建10个
|
||||
当Quota使用超过后,禁止创建
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: demo-quota
|
||||
namespace: demo-ns
|
||||
|
||||
spec:
|
||||
hard:
|
||||
cpu: "1000"
|
||||
memory: 200Gi
|
||||
pods: "10"
|
||||
scopeSelector: # 可以不填
|
||||
matchExpressions:
|
||||
- operator: Exists
|
||||
scopeName: NotBestEffort
|
1
CloudNative/Kubernetes/Base/k8s集群dns的记录方式
Normal file
|
@ -0,0 +1 @@
|
|||
服务名.namespace.svc.cluster.local. ip
|
18
CloudNative/Kubernetes/Base/kubeadm证书相关.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
## 不能检查外部的ca签名的证书
|
||||
```
|
||||
kubeadm alpha certs check-expiration
|
||||
```
|
||||
|
||||
## 自动更新证书
|
||||
kubeadm会在控制面板升级时,自动更新所有证书
|
||||
|
||||
## 手动更新你的证书
|
||||
```
|
||||
kubeadm alpha certs renew
|
||||
```
|
||||
此命令用 CA (或者 front-proxy-CA )证书和存储在 /etc/kubernetes/pki 中的密钥执行更新。
|
||||
|
||||
kubeadm alpha certs renew 提供下列选项
|
||||
|
||||
--csr-only 可用于经过一个外部 CA 生成的证书签名请求来更新证书(无需实际替换更新证书)
|
||||
可以更新单个证书而不是全部证书
|
242
CloudNative/Kubernetes/Base/kubectl备忘录.md
Normal file
|
@ -0,0 +1,242 @@
|
|||
### Kubectl 上下文和配置
|
||||
```
|
||||
kubectl config view # 显示合并的 kubeconfig 配置。
|
||||
|
||||
# 同时使用多个 kubeconfig 文件并查看合并的配置
|
||||
KUBECONFIG=~/.kube/config:~/.kube/kubconfig2 kubectl config view
|
||||
|
||||
# 获取 e2e 用户的密码
|
||||
kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'
|
||||
|
||||
kubectl config view -o jsonpath='{.users[].name}' # 显示第一个用户
|
||||
kubectl config view -o jsonpath='{.users[*].name}' # 获取用户列表
|
||||
kubectl config get-contexts # 显示上下文列表
|
||||
kubectl config current-context # 展示当前所处的上下文
|
||||
kubectl config use-context my-cluster-name # 设置默认的上下文为 my-cluster-name
|
||||
|
||||
# 添加新的集群配置到 kubeconf 中,使用 basic auth 进行身份认证
|
||||
kubectl config set-credentials kubeuser/foo.kubernetes.com --username=kubeuser --password=kubepassword
|
||||
|
||||
# 在指定上下文中持久性地保存名字空间,供所有后续 kubectl 命令使用
|
||||
kubectl config set-context --current --namespace=ggckad-s2
|
||||
|
||||
# 使用特定的用户名和名字空间设置上下文
|
||||
kubectl config set-context gce --user=cluster-admin --namespace=foo \
|
||||
&& kubectl config use-context gce
|
||||
|
||||
kubectl config unset users.foo # 删除用户 foo
|
||||
```
|
||||
|
||||
### 创建对象
|
||||
```
|
||||
kubectl apply -f ./my-manifest.yaml # 创建资源
|
||||
kubectl apply -f ./my1.yaml -f ./my2.yaml # 使用多个文件创建
|
||||
kubectl apply -f ./dir # 基于目录下的所有清单文件创建资源
|
||||
kubectl apply -f https://git.io/vPieo # 从 URL 中创建资源
|
||||
kubectl create deployment nginx --image=nginx # 启动单实例 nginx
|
||||
kubectl explain pods,svc # 获取 pod 清单的文档说明
|
||||
```
|
||||
|
||||
### 查看和查找资源
|
||||
```
|
||||
# get 命令的基本输出
|
||||
kubectl get services # 列出当前命名空间下的所有 services
|
||||
kubectl get pods --all-namespaces # 列出所有命名空间下的全部的 Pods
|
||||
kubectl get pods -o wide # 列出当前命名空间下的全部 Pods,并显示更详细的信息
|
||||
kubectl get deployment my-dep # 列出某个特定的 Deployment
|
||||
kubectl get pods # 列出当前命名空间下的全部 Pods
|
||||
kubectl get pod my-pod -o yaml # 获取一个 pod 的 YAML
|
||||
|
||||
# describe 命令的详细输出
|
||||
kubectl describe nodes my-node
|
||||
kubectl describe pods my-pod
|
||||
|
||||
# 列出当前名字空间下所有 Services,按名称排序
|
||||
kubectl get services --sort-by=.metadata.name
|
||||
|
||||
# 列出 Pods,按重启次数排序
|
||||
kubectl get pods --sort-by='.status.containerStatuses[0].restartCount'
|
||||
|
||||
# 列举所有 PV 持久卷,按容量排序
|
||||
kubectl get pv --sort-by=.spec.capacity.storage
|
||||
|
||||
# 获取包含 app=cassandra 标签的所有 Pods 的 version 标签
|
||||
kubectl get pods --selector=app=cassandra -o \
|
||||
jsonpath='{.items[*].metadata.labels.version}'
|
||||
|
||||
# 获取所有工作节点(使用选择器以排除标签名称为 'node-role.kubernetes.io/master' 的结果)
|
||||
kubectl get node --selector='!node-role.kubernetes.io/master'
|
||||
|
||||
# 获取当前命名空间中正在运行的 Pods
|
||||
kubectl get pods --field-selector=status.phase=Running
|
||||
|
||||
# 获取全部节点的 ExternalIP 地址
|
||||
kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
|
||||
|
||||
# 列出属于某个特定 RC 的 Pods 的名称
|
||||
# 在转换对于 jsonpath 过于复杂的场合,"jq" 命令很有用;可以在 https://stedolan.github.io/jq/ 找到它。
|
||||
sel=${$(kubectl get rc my-rc --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')%?}
|
||||
echo $(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})
|
||||
|
||||
# 显示所有 Pods 的标签(或任何其他支持标签的 Kubernetes 对象)
|
||||
kubectl get pods --show-labels
|
||||
|
||||
# 检查哪些节点处于就绪状态
|
||||
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \
|
||||
&& kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True"
|
||||
|
||||
# 列出被一个 Pod 使用的全部 Secret
|
||||
kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq
|
||||
|
||||
# 列举所有 Pods 中初始化容器的容器 ID(containerID)
|
||||
# Helpful when cleaning up stopped containers, while avoiding removal of initContainers.
|
||||
kubectl get pods --all-namespaces -o jsonpath='{range .items[*].status.initContainerStatuses[*]}{.containerID}{"\n"}{end}' | cut -d/ -f3
|
||||
|
||||
# 列出事件(Events),按时间戳排序
|
||||
kubectl get events --sort-by=.metadata.creationTimestamp
|
||||
|
||||
# 比较当前的集群状态和假定某清单被应用之后的集群状态
|
||||
kubectl diff -f ./my-manifest.yaml
|
||||
```
|
||||
|
||||
### 更新资源
|
||||
```
|
||||
kubectl set image deployment/frontend www=image:v2 # 滚动更新 "frontend" Deployment 的 "www" 容器镜像
|
||||
kubectl rollout history deployment/frontend # 检查 Deployment 的历史记录,包括版本
|
||||
kubectl rollout undo deployment/frontend # 回滚到上次部署版本
|
||||
kubectl rollout undo deployment/frontend --to-revision=2 # 回滚到特定部署版本
|
||||
kubectl rollout status -w deployment/frontend # 监视 "frontend" Deployment 的滚动升级状态直到完成
|
||||
kubectl rollout restart deployment/frontend # 轮替重启 "frontend" Deployment
|
||||
|
||||
cat pod.json | kubectl replace -f - # 通过传入到标准输入的 JSON 来替换 Pod
|
||||
|
||||
# 强制替换,删除后重建资源。会导致服务不可用。
|
||||
kubectl replace --force -f ./pod.json
|
||||
|
||||
# 为多副本的 nginx 创建服务,使用 80 端口提供服务,连接到容器的 8000 端口。
|
||||
kubectl expose rc nginx --port=80 --target-port=8000
|
||||
|
||||
# 将某单容器 Pod 的镜像版本(标签)更新到 v4
|
||||
kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
|
||||
|
||||
kubectl label pods my-pod new-label=awesome # 添加标签
|
||||
kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq # 添加注解
|
||||
kubectl autoscale deployment foo --min=2 --max=10 # 对 "foo" Deployment 自动伸缩容
|
||||
```
|
||||
|
||||
### 部分更新资源
|
||||
```
|
||||
# 部分更新某节点
|
||||
kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'
|
||||
|
||||
# 更新容器的镜像;spec.containers[*].name 是必须的。因为它是一个合并性质的主键。
|
||||
kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}'
|
||||
|
||||
# 使用带位置数组的 JSON patch 更新容器的镜像
|
||||
kubectl patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]'
|
||||
|
||||
# 使用带位置数组的 JSON patch 禁用某 Deployment 的 livenessProbe
|
||||
kubectl patch deployment valid-deployment --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/livenessProbe"}]'
|
||||
|
||||
# 在带位置数组中添加元素
|
||||
kubectl patch sa default --type='json' -p='[{"op": "add", "path": "/secrets/1", "value": {"name": "whatever" } }]'
|
||||
```
|
||||
|
||||
### 编辑资源
|
||||
```
|
||||
kubectl edit svc/docker-registry # 编辑名为 docker-registry 的服务
|
||||
KUBE_EDITOR="nano" kubectl edit svc/docker-registry # 使用其他编辑器
|
||||
```
|
||||
|
||||
### 对资源进行伸缩
|
||||
```
|
||||
kubectl scale --replicas=3 rs/foo # 将名为 'foo' 的副本集伸缩到 3 副本
|
||||
kubectl scale --replicas=3 -f foo.yaml # 将在 "foo.yaml" 中的特定资源伸缩到 3 个副本
|
||||
kubectl scale --current-replicas=2 --replicas=3 deployment/mysql # 如果名为 mysql 的 Deployment 的副本当前是 2,那么将它伸缩到 3
|
||||
kubectl scale --replicas=5 rc/foo rc/bar rc/baz # 伸缩多个副本控制器
|
||||
```
|
||||
|
||||
### 删除资源
|
||||
```
|
||||
kubectl delete -f ./pod.json # 删除在 pod.json 中指定的类型和名称的 Pod
|
||||
kubectl delete pod,service baz foo # 删除名称为 "baz" 和 "foo" 的 Pod 和服务
|
||||
kubectl delete pods,services -l name=myLabel # 删除包含 name=myLabel 标签的 pods 和服务
|
||||
kubectl delete pods,services -l name=myLabel --include-uninitialized # 删除包含 label name=myLabel 标签的 Pods 和服务
|
||||
kubectl -n my-ns delete po,svc --all # 删除在 my-ns 名字空间中全部的 Pods 和服务
|
||||
# 删除所有与 pattern1 或 pattern2 awk 模式匹配的 Pods
|
||||
kubectl get pods -n mynamespace --no-headers=true | awk '/pattern1|pattern2/{print $1}' | xargs kubectl delete -n mynamespace pod
|
||||
```
|
||||
|
||||
### 与运行中的 Pods 进行交互
|
||||
```
|
||||
kubectl logs my-pod # 获取 pod 日志(标准输出)
|
||||
kubectl logs -l name=myLabel # 获取含 name=myLabel 标签的 Pods 的日志(标准输出)
|
||||
kubectl logs my-pod --previous # 获取上个容器实例的 pod 日志(标准输出)
|
||||
kubectl logs my-pod -c my-container # 获取 Pod 容器的日志(标准输出, 多容器场景)
|
||||
kubectl logs -l name=myLabel -c my-container # 获取含 name=myLabel 标签的 Pod 容器日志(标准输出, 多容器场景)
|
||||
kubectl logs my-pod -c my-container --previous # 获取 Pod 中某容器的上个实例的日志(标准输出, 多容器场景)
|
||||
kubectl logs -f my-pod # 流式输出 Pod 的日志(标准输出)
|
||||
kubectl logs -f my-pod -c my-container # 流式输出 Pod 容器的日志(标准输出, 多容器场景)
|
||||
kubectl logs -f -l name=myLabel --all-containers # 流式输出含 name=myLabel 标签的 Pod 的所有日志(标准输出)
|
||||
kubectl run -i --tty busybox --image=busybox -- sh # 以交互式 Shell 运行 Pod
|
||||
kubectl run nginx --image=nginx -n mynamespace # 在指定名字空间中运行 nginx Pod
|
||||
kubectl run nginx --image=nginx # 运行 ngins Pod 并将其规约写入到名为 pod.yaml 的文件
|
||||
--dry-run=client -o yaml > pod.yaml
|
||||
|
||||
kubectl attach my-pod -i # 挂接到一个运行的容器中
|
||||
kubectl port-forward my-pod 5000:6000 # 在本地计算机上侦听端口 5000 并转发到 my-pod 上的端口 6000
|
||||
kubectl exec my-pod -- ls / # 在已有的 Pod 中运行命令(单容器场景)
|
||||
kubectl exec my-pod -c my-container -- ls / # 在已有的 Pod 中运行命令(多容器场景)
|
||||
kubectl top pod POD_NAME --containers # 显示给定 Pod 和其中容器的监控数据
|
||||
```
|
||||
|
||||
### 与节点和集群进行交互
|
||||
```
|
||||
kubectl cordon my-node # 标记 my-node 节点为不可调度
|
||||
kubectl drain my-node # 对 my-node 节点进行清空操作,为节点维护做准备
|
||||
kubectl uncordon my-node # 标记 my-node 节点为可以调度
|
||||
kubectl top node my-node # 显示给定节点的度量值
|
||||
kubectl cluster-info # 显示主控节点和服务的地址
|
||||
kubectl cluster-info dump # 将当前集群状态转储到标准输出
|
||||
kubectl cluster-info dump --output-directory=/path/to/cluster-state # 将当前集群状态输出到 /path/to/cluster-state
|
||||
|
||||
# 如果已存在具有指定键和效果的污点,则替换其值为指定值
|
||||
kubectl taint nodes foo dedicated=special-user:NoSchedule
|
||||
```
|
||||
|
||||
### 资源类型
|
||||
```
|
||||
kubectl api-resources
|
||||
```
|
||||
用于探索 API 资源的其他操作
|
||||
```
|
||||
kubectl api-resources --namespaced=true # 所有命名空间作用域的资源
|
||||
kubectl api-resources --namespaced=false # 所有非命名空间作用域的资源
|
||||
kubectl api-resources -o name # 用简单格式列举所有资源(仅显示资源名称)
|
||||
kubectl api-resources -o wide # 用扩展格式列举所有资源(又称 "wide" 格式)
|
||||
kubectl api-resources --verbs=list,get # 支持 "list" 和 "get" 请求动词的所有资源
|
||||
kubectl api-resources --api-group=extensions # "extensions" API 组中的所有资源
|
||||
```
|
||||
|
||||
### 格式化输出
|
||||
|
||||
|
||||
|
||||
| 输出格式 | 描述 |
|
||||
| -------- | -------- |
|
||||
| -o=custom-columns=<spec> | 使用逗号分隔的自定义列来打印表格 |
|
||||
| -o=custom-columns-file=<filename> | 使用 <filename> 文件中的自定义列模板打印表格 |
|
||||
| -o=json | 输出 JSON 格式的 API 对象 |
|
||||
| -o=jsonpath=<template> | 打印 jsonpath 表达式中定义的字段 |
|
||||
| -o=jsonpath-file=<filename> | 打印在 <filename> 文件中定义的 jsonpath 表达式所指定的字段。 |
|
||||
| -o=name | 仅打印资源名称而不打印其他内容 |
|
||||
| -o=wide | 以纯文本格式输出额外信息,对于 Pod 来说,输出中包含了节点名称 |
|
||||
| -o=yaml | 输出 YAML 格式的 API 对象 |
|
||||
|
||||
|
||||
-----
|
||||
|
||||
|
||||
未完待续
|
||||
|
||||
|
BIN
CloudNative/Kubernetes/Base/kubernetes运行时安全策略.bmp
Normal file
After Width: | Height: | Size: 4 MiB |
BIN
CloudNative/Kubernetes/Base/pv延迟绑定.bmp
Normal file
After Width: | Height: | Size: 3.1 MiB |
25
CloudNative/Kubernetes/Base/为pod配置hosts.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
```
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
...
|
||||
spec:
|
||||
hostAliases:
|
||||
- ip: "10.1.2.3"
|
||||
hostnames:
|
||||
- "foo.remote"
|
||||
- "bar.remote"
|
||||
...
|
||||
```
|
||||
启动pod后,/etc/hosts文件内容如下
|
||||
```
|
||||
|
||||
cat /etc/hosts
|
||||
# Kubernetes-managed hosts file.
|
||||
127.0.0.1 localhost
|
||||
...
|
||||
10.244.135.10 hostaliases-pod
|
||||
10.1.2.3 foo.remote
|
||||
10.1.2.3 bar.remote
|
||||
```
|
||||
需要指出的是,在 Kubernetes 项目中,如果要设置 hosts 文件里的内容,一定要通过这种方法。否则,如果直接修改了 hosts 文件的话,在 Pod 被删除重建之后,kubelet 会自动覆盖掉被修改的内容。
|
37
CloudNative/Kubernetes/Base/亲和性调度
Normal file
|
@ -0,0 +1,37 @@
|
|||
pod.spec.affinity.PodAffinity,pod亲和性
|
||||
必须和某些pod调度到一起:requiredDuringScheduingIgnoredDuringExecution
|
||||
优先和某些pod调度到一起:PreferredDuringScheduingIgnoredDuringExecution
|
||||
|
||||
pod.spec.affinity.PodAntiAffinity,pod反亲和性
|
||||
禁止和某些pod调度到一起:requiredDuringScheduingIgnoredDuringExecution
|
||||
优先不和某些pod调度到一起:PreferredDuringScheduingIgnoredDuringExecution
|
||||
|
||||
pod.spec.affinity.nodeAffinity,node亲和性,必须调度到
|
||||
禁止调度到:requiredDuringScheduingIgnoredDuringExecution
|
||||
优先不和某些pod调度到一起:PreferredDuringScheduingIgnoredDuringExecution
|
||||
|
||||
pod.spec.nodeSelector,强制调度到带有某些标签的node中
|
||||
|
||||
node加上taints(污点)
|
||||
一个node上可以有多个Taints
|
||||
spec.taints中的effect不能为空,有三个不同的级别
|
||||
NoSchedule: 禁止新的pod调度上来
|
||||
PreferNoSchedule: 新pod尽量不调度上来
|
||||
NoExecute: 驱逐(evict)没有对应toleration的pod,也不会调度新的上来
|
||||
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: demo-node
|
||||
spec:
|
||||
taints:
|
||||
- key: 'k1'
|
||||
value: 'v1'
|
||||
effect: 'NoSchedul'
|
||||
|
||||
pod加上tolerations(容忍)
|
||||
一个pod上可以有多个tolerations
|
||||
effect可以为空,匹配所有
|
||||
取值与taints的effect一致
|
||||
|
||||
oprater: Exist/Equal
|
|
@ -0,0 +1,9 @@
|
|||
centos7修改/etc/sysconfig/kubelet文件
|
||||
```
|
||||
KUBELET_EXTRA_ARGS=--cgroup-driver=systemd
|
||||
```
|
||||
重启kubelet
|
||||
```
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet
|
||||
```
|
102
CloudNative/Kubernetes/Base/外部访问集群内部应用.md
Normal file
|
@ -0,0 +1,102 @@
|
|||
### hostPort或hostNetwork
|
||||
#### hostNetwork
|
||||
和containers平级的hostNetwork: true,表示pod使用宿主机网络,配合nodeSelector,把pod实例化在固定节点
|
||||
```
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
nodeSelector: # node节点选择器
|
||||
role: master # node节点标签(Label)
|
||||
hostNetwork: true # 使用node节点网络
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nginx
|
||||
ports:
|
||||
```
|
||||
#### hostPort
|
||||
和hostNetwork相比多了映射能力,可以把容器端口映射为node节点不同端口,hostPort,当然也需要nodeSelector来固定节点,不然每次创建,节点不同,ip也会改变
|
||||
```
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
nodeSelector: # node节点选择器
|
||||
role: master # node节点标签(Label)
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 80 #重点
|
||||
```
|
||||
### nodePort
|
||||
访问方式:集群内任意节点ip加nodePort所配端口号
|
||||
```
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-pod-service
|
||||
labels:
|
||||
app: nginx #自身标签
|
||||
spec:
|
||||
type: NodePort # 类型ExternalName, ClusterIP, NodePort, and LoadBalancer
|
||||
ports:
|
||||
- port: 8080 # service在k8s集群内服务端口
|
||||
targetPort: 8080 # 关联pod对外开放端口
|
||||
nodePort: 30088 # 集群外访问端口,端口范围【30000-32767】
|
||||
selector:
|
||||
app: nginx # pod标签
|
||||
```
|
||||
### ingress
|
||||
### kubectl port-forward
|
BIN
CloudNative/Kubernetes/Base/存储快照例子.bmp
Normal file
After Width: | Height: | Size: 2 MiB |
BIN
CloudNative/Kubernetes/Base/容器安全策略.bmp
Normal file
After Width: | Height: | Size: 4.2 MiB |
7
CloudNative/Kubernetes/Base/容器隔离
Normal file
|
@ -0,0 +1,7 @@
|
|||
mount:只能看到容器的目录结构的视图,不能看到宿主机的
|
||||
uts:domain name,hostname隔离
|
||||
ipc:进程间通信隔离
|
||||
pid:保证初始化的进程id为1
|
||||
cgroup:容器中看到的cgroup视图时以/方式呈现的,为了安全
|
||||
network:网络隔离,host,container,none,bridge
|
||||
user:用户隔离
|
19
CloudNative/Kubernetes/Base/并行运行JOB.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
```
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
|
||||
metadata:
|
||||
name: paral-1
|
||||
|
||||
spec:
|
||||
completions: 8 ## 本pod队列可以执行的次数,8表示这个任务被执行8ci
|
||||
parallelism: 2 ## 并行执行的个数,2表示并行执行的pod的数量,即会有2个pod同时执行
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: param
|
||||
image: ubuntu
|
||||
command: ["bin/sh"]
|
||||
args: ["-c", "sleep 30;date"]
|
||||
restartPolicy: OnFailure
|
||||
```
|
BIN
CloudNative/Kubernetes/Base/应用故障排查.bmp
Normal file
After Width: | Height: | Size: 3.8 MiB |
19
CloudNative/Kubernetes/Base/更新维护node.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
1、节点标记为不可调度
|
||||
```
|
||||
kubectl cordon node1
|
||||
```
|
||||
节点状态变为如下所示
|
||||
```
|
||||
# kubectl get nods
|
||||
NAME STATUS AGE
|
||||
node1 Ready,SchedulingDisabled 1d
|
||||
node2 Ready 1d
|
||||
```
|
||||
2、驱逐node1上的pod
|
||||
```
|
||||
kubectl drain node1 --delete-local-data --force --ignore-daemonsets
|
||||
```
|
||||
3、恢复调度
|
||||
```
|
||||
kubectl uncordon node1
|
||||
```
|
1
CloudNative/Kubernetes/Base/查看kube-proxy模式
Normal file
|
@ -0,0 +1 @@
|
|||
curl localhost:10249/proxyMode
|
BIN
CloudNative/Kubernetes/Base/网络策略示例.bmp
Normal file
After Width: | Height: | Size: 3.4 MiB |
3
CloudNative/Kubernetes/Base/远程调试策略
Normal file
|
@ -0,0 +1,3 @@
|
|||
1、如果想把本地应用代理到远程集群,使用:telepresence这样的工具实现
|
||||
2、如果想把远程的应用代理到本地,在本地调用或调试,可是使用port-forword机制
|
||||
kubectl port-forward svc/app -n app-namespace
|
394
CloudNative/Kubernetes/Base/问题.md
Normal file
|
@ -0,0 +1,394 @@
|
|||
# docker网络模式
|
||||
Docker使用了Linux的Namespaces技术来进行资源隔离,如PID Namespace隔离进程,Mount Namespace隔离文件系统,Network Namespace隔离网络等。一个Network Namespace提供了一份独立的网络环境,包括网卡、路由、Iptable规则等都与其他的Network Namespace隔离。一个Docker容器一般会分配一个独立的Network Namespace
|
||||
### host模式,使用--net=host指定
|
||||
容器将不会获得一个独立的Network Namespace,而是和宿主机共用一个Network Namespace。容器将不会虚拟出自己的网卡,配置自己的IP等,而是使用宿主机的IP和端口.文件系统等隔离,但网络不隔离。
|
||||
|
||||
### container模式,使用--net=container:NAME_or_ID指定
|
||||
新创建的容器和已经存在的一个容器共享一个Network Namespace,而不是和宿主机共享。新创建的容器不会创建自己的网卡,配置自己的IP,而是和一个指定的容器共享IP、端口范围等。同样,两个容器除了网络方面,其他的如文件系统、进程列表等还是隔离的。两个容器的进程可以通过lo网卡设备通信。
|
||||
|
||||
### none模式,使用--net=none指定
|
||||
Docker容器拥有自己的Network Namespace,但是,并不为Docker容器进行任何网络配置。也就是说,这个Docker容器没有网卡、IP、路由等信息。需要我们自己为Docker容器添加网卡、配置IP等
|
||||
|
||||
### bridge模式,使用--net=bridge指定。默认模式
|
||||
为每一个容器分配、设置IP等,并将容器连接到一个docker0虚拟网桥,通过docker0网桥以及Iptables nat表配置与宿主机通信
|
||||
|
||||
|
||||
# docker swarm与k8s的区别
|
||||
swarm是docker公司开发的集群编排工具,k8s是google开发的运用十年的borg的开源版本
|
||||
1、docker swarm架构简单,部署成本低,但是集群健壮性比较差,k8s部署比较复杂,健壮性比较强
|
||||
2、swarm与k8s有很高的扩展性,扩展速度也很快。但k8s扩展速度很快
|
||||
3、swarm可以在不同容器中自动实现负载均衡,k8s需要手动在不同pod的不同容器间配置负载
|
||||
4、都可以滚动更新,但是swarm不能自动回滚
|
||||
5、k8s数据卷只能同一个pod中不同容器共享,但是swarm可以与其它容器共享
|
||||
|
||||
|
||||
# 如何在 Kubernetes 中实现负载均衡?
|
||||
kube-proxy实现4层负载均衡(ipvs哟很多调度算法),负责把service请求转发到后台的pod
|
||||
ingress实现7层负载均衡,负责把内部服务暴露到外网访问
|
||||
|
||||
# 在生产中,你如何实现 Kubernetes 自动化?
|
||||
### 日志
|
||||
使用efk、promethuse等日志收集与监控套件,过滤与标记异常
|
||||
### 自我修复
|
||||
k8s定期检测pod与容器的健康状况,立即采取措施处理,pod状态(podstatus和containerstatus)。容器探针(readinessProbe和livenessProbe),readiness探针很重要,容器可能是运行状态,但是未就绪,并不传送任何流量
|
||||
### 弹性测试
|
||||
需要使用贪心测试工具kube-monkey等(**不懂**)
|
||||
### 例行审计
|
||||
需要工具
|
||||
### 自动扩展
|
||||
扩展pod,扩展节点(node)
|
||||
扩展pod主要看heapster的度量标准执行,确认是否需要创建新pod。hpa(horizontal pod autoscaler)自动扩展
|
||||
扩展node需要iaas支持
|
||||
|
||||
### 资源配额
|
||||
限制k8s中的namespace,确保某个应用程序不会占用所有资源
|
||||
|
||||
### 容器资源约束
|
||||
限制容器的内存与cpu等
|
||||
|
||||
|
||||
# 你如何扩展 Kubernetes 集群?
|
||||
|
||||
---
|
||||
|
||||
# 你能解释 Deployment、ReplicaSet、DaemonSet、StatefulSet、Pod、Job、CronJob 的不同用途吗?
|
||||
Deployment、ReplicaSet等对应的服务是service,而statefulSet使用的是headless service。无头服务(headless serviece)与service不同的地方是无头服务没有设置clusterIP,访问它的时候,直接访问的是无头服务后面对应的pod
|
||||
### Deployment
|
||||
Deployment为Pod和Replica Set提供声明式更新。
|
||||
只需要在 Deployment 中描述想要的目标状态是什么,Deployment controller 就会帮您将 Pod 和ReplicaSet 的实际状态改变到您的目标状态。您可以定义一个全新的 Deployment 来创建 ReplicaSet 或者删除已有的 Deployment 并创建一个新的来替换
|
||||
|
||||
Deployment实际上是通过ReplicaSet实现滚动更新
|
||||
Deployment控制ReplicaSet,ReplicaSet控制Pod
|
||||
|
||||
### ReplicaSet
|
||||
ReplicaSet是为了替换ReplicationController,rc只支持灯饰的selector,如version=v1等,但rs支持集合形式的selector,如vsersion in (v1,v2),或version not in (v1,v2)
|
||||
|
||||
### DaemonSet
|
||||
DaemonSet 确保全部(或者一些)Node 上运行一个 Pod 的副本。当有 Node 加入集群时,也会为他们新增一个 Pod 。当有 Node 从集群移除时,这些 Pod 也会被回收。删除 DaemonSet 将会删除它创建的所有 Pod
|
||||
典型用途:
|
||||
运行集群存储 daemon,例如在每个 Node 上运行 glusterd、ceph。
|
||||
日志收集,比如fluentd,logstash等
|
||||
系统监控,比如Prometheus Node Exporter,collectd,New Relic agent,Ganglia gmond等
|
||||
系统程序,比如kube-proxy, kube-dns, glusterd, ceph等
|
||||
|
||||
### StatefulSet
|
||||
面向有状态的服务,管理的pod有固定的名称、起停顺序等,还要保持数据一致性,需要用到持久化存储(PersistentVolumeClaim)
|
||||
|
||||
### Pod
|
||||
pod是k8s集群管理中的最小单位.是一个或多个容器的集合。同一个pod中的容器共享网络,存储、命名空间等
|
||||
|
||||
### Job
|
||||
用于批处理一次性任务,并保证一个或多个任务成功结束
|
||||
|
||||
### CronJob
|
||||
在Job基础上,加上时间调度,执行周期性任务
|
||||
|
||||
# Kubernetes 如何处理持久性?
|
||||
pv和pvc。pv是集群中的一块存储空间,由集群管理员或者存储类(Storage Class)管理,pv是一个资源对象
|
||||
pvc(PersistentVolueClaim)代表应用使用存储的请求,pvc通过与pv绑定使用。满足的条件是pv与pvc spec字段匹配,例如pvc申请的字段必须要小于等于pv大小,pv与pvc的StorageClassName必须一样
|
||||
# service和 ingress 的作用是什么?
|
||||
负载均衡,提供k8s集群外部访问的入口。ingres接收到来自外部的请求,会根据规则转发到service,service后有多个pod,转发一个pod中,这个pod处理请求
|
||||
Service是通过规则定义出由多个Pod对象组合而成的逻辑组合以及访问这组Pod的策略。Service资源为此类Pod对象提供了一个固定、统一的访问入口及负载均衡的能力,并支持借助于新一代DNS系统的服务发现功能,解决客户端发现并访问容器化应用的难题
|
||||
service并不是直接与pod连接,service与pod之间还有一个中间层-endpoints资源第项,它是由ip与端口组成的列表,一般情况下,service创建以后,关联的endpoints也会自动创建
|
||||
|
||||
# 你何时会使用像 ConfigMap 或 secret 这样的东西?
|
||||
configmap 核心作用是让配置信息和镜像解耦,pod使用configMap中的配置信息,如果后端pod配置文件有变化,只需要修改configMap就好了,pod会动态改变配置信息
|
||||
ConfigMap对像是一系列配置的集合,k8s会将这一集合注入到对应的Pod中,并为容器成功启动使用。注入的方式一般有两种,一种是挂载存储卷,一种是传递变量。ConfigMap被引用之前必须存在,属于名称空间级别,不能跨名称空间使用,内容明文显示。ConfigMap内容修改后,对应的pod必须重启或者重新加载配置。
|
||||
|
||||
secret与configmap类似,存储的信息使用base64编码,一般用于docker-registry(docker仓库使用),tls(证书,比如说https证书)和一般generic
|
||||
|
||||
# Pod 亲和性作用是什么?
|
||||
pod的亲和性主要用来解决pod可以和哪些pod部署在同一个集群里面,即拓扑域(由node组成的集群)里面;而pod的反亲和性是为了解决pod不能和哪些pod部署在一起的问题,二者都是为了解决pod之间部署问题。需要注意的是,Pod 间亲和与反亲和需要大量的处理,这可能会显著减慢大规模集群中的调度,不建议在具有几百个节点的集群中使用,而且Pod 反亲和需要对节点进行一致的标记,即集群中的每个节点必须具有适当的标签能够匹配 topologyKey。如果某些或所有节点缺少指定的 topologyKey 标签,可能会导致意外行为
|
||||
Pod亲和性调度需要各相关的Pod对象运行于“同一位置”, 而反亲和性调度则要求它们不能运行于“同一位置” 。同一位置取决于节点的位置拓扑, 拓扑的方式不同
|
||||
如果以基于各节点的kubernetes.io/hostname标签作为评判标准,那么很显然,“同一位置” 意味着同一个节点,不同节点即不同的位置, 如图所示
|
||||
如果是基于所划分的故障转移域来进行评判,同一位置, 而server2和server3属于另一个意义上的同一位置
|
||||
因此,在定义Pod对象的亲和性与反亲和性时,需要借助于标签选择器来选择被依赖的Pod对象,并根据选出的Pod对象所在节点的标签来判定“同一位置”的具体意义
|
||||
|
||||
# 你能举例说明何时使用 Init Container 么?
|
||||
初始化容器,顾名思义容器启动的时候,会先启动可一个或多个容器,如果有多个,那么这几个Init Container按照定义的顺序依次执行,一个执行成功,才能执行下一个,只有所有的Init Container执行完后,主容器才会启动。由于一个Pod里的存储卷是共享的,所以Init Container里产生的数据可以被主容器使用到。
|
||||
|
||||
Init Container可以在多种K8S资源里被使用到如Deployment、Daemon Set、StatefulSet、Job等,但归根结底都是在Pod启动时,在主容器启动前执行,做初始化工作。
|
||||
|
||||
Init 容器支持应用容器的全部字段和特性,包括资源限制、数据卷和安全设置。然而,Init 容器不支持 Readiness Probe,因为它们必须在 Pod 就绪之前运行完成;在资源限制、调度方面也会略有不同。
|
||||
|
||||
|
||||
**等待其它模块Ready**:比如有一个应用里面有两个容器化的服务,一个是Web Server,另一个是数据库。其中Web Server需要访问数据库。但是当我们启动这个应用的时候,并不能保证数据库服务先启动起来,所以可能出现在一段时间内Web Server连接数据库错误。为了解决这个问题,我们可以在运行Web Server服务的Pod里使用一个InitContainer,去检查数据库是否准备好,直到数据库可以连接,Init Container才结束退出,然后Web Server容器被启动,发起正式的数据库连接请求。
|
||||
**初始化配置**:比如集群里检测所有已经存在的成员节点,为主容器准备好集群的配置信息,这样主容器起来后就能用这个配置信息加入集群;目前在容器化,初始化集群配置文件时经常用到;
|
||||
**提供一种阻塞容器启动的方式**:必须在initContainer容器启动成功后,才会运行下一个容器,保证了一组条件运行成功的方式;
|
||||
**其它使用场景**:将pod注册到一个中央数据库、下载应用依赖等。
|
||||
|
||||
# 什么是 sidecar 容器?你能给出一个用例,说明你为什么要使用它么?
|
||||
|
||||
# 在构建和管理生产集群时遇到的主要问题是什么?
|
||||
# 为什么你会建议公司在云中构建自己的 K8S 集群而不是使用托管服务?
|
||||
# 什么是 Istio 和 Linkerd?
|
||||
Istio和Linkerd都支持以主流的外挂(Sidecar)模式部署。在这种模式下,每个微服务都被分配一个单独的代理。微服务间的通信并不直接进行,而是通过自身的代理转发。代理会将请求路由到目标微服务的代理,该代理再将请求转发到目标微服务。所有这些服务代理构成了数据层。在服务网格的架构下,数据层由控制层(control plane)来进行配置和监控,控制层一般另行独立部署。
|
||||
Istio 提供一种简单的方式来为已部署的服务建立网络,该网络具有负载均衡、服务间认证、监控等功能,而不需要对服务的代码做任何改动。
|
||||
|
||||
# 什么是 Kubernetes Operator?
|
||||
operator旨在简化负载的有状态应用管理的框架。是一个用于感知应用状态的控制器,通过扩展k8s api来创建、管理配置应用
|
||||
operator通过扩展k8s定义custom controllor,观察情况并根据运行情况自定义任务。管理自定义cr(custom resource)
|
||||
Operator是一个感知应用状态的控制器,所以实现一个Operator最关键的就是把管理应用状态的所有操作封装到配置资源和控制器中。通常来说Operator需要包括以下功能:
|
||||
|
||||
Operator自身以deployment的方式部署
|
||||
Operator自动创建一个Third Party Resources资源类型,用户可以用该类型创建应用实例
|
||||
Operator应该利用Kubernetes内置的Serivce/ReplicaSet等管理应用
|
||||
Operator应该向后兼容,并且在Operator自身退出或删除时不影响应用的状态
|
||||
|
||||
Operator应该支持应用版本更新
|
||||
|
||||
Operator应该测试Pod失效、配置错误、网络错误等异常情况
|
||||
|
||||
# kubernetes包含几个组件。 各个组件的功能是什么。组件之间是如何交互的。
|
||||
c/s架构
|
||||
### kube-controller-manager
|
||||
负责维护集群的状态,比如故障检测、自动扩展、滚动更新等
|
||||
|
||||
### kube-scheduler
|
||||
负责资源的调度,按照预定的调度策略将Pod调度到相应的机器上
|
||||
|
||||
### etcd
|
||||
etcd 是兼具一致性和高可用性的键值数据库,可以作为保存 Kubernetes 所有集群数据的后台数据库。
|
||||
存储集群状态等
|
||||
|
||||
### kube-apiserver
|
||||
外部管理k8s集群的唯一入口,并提供认证、授权、访问控制、API注册和发现等机制。通过apiserver完成对k8s集群管理交互,如pod的增删改查等
|
||||
|
||||
### kubectl
|
||||
k8s集群的命令行工具,使用该工具与apiserver交互,完成对k8s的管理
|
||||
|
||||
### kubelet
|
||||
负责维护容器的生命周期,同时也负责Volume(CVI)和网络(CNI)的管理
|
||||
|
||||
### kube-proxy
|
||||
负责为Service提供cluster内部的服务发现和负载均衡
|
||||
|
||||
### ingress-controller
|
||||
为外部提供集群内部应用访问入口
|
||||
|
||||
### dns
|
||||
负责为整个集群提供DNS服务
|
||||
|
||||
### heapter
|
||||
监控
|
||||
|
||||
### dashboard
|
||||
gui
|
||||
|
||||
# k8s的pause容器有什么用。是否可以去掉。
|
||||
pause是k8s基础容器,很稳定。pod中的所有容器都与pause 共享namespace。pid=1负责处理僵尸进程。
|
||||
# k8s中的pod内几个容器之间的关系是什么。
|
||||
共享各种namespace,通过localhost通信
|
||||
# 一个经典pod的完整生命周期。
|
||||
初始化容器 initc
|
||||
启动后
|
||||
存活探针livenessProbe
|
||||
就绪探针readinessProbe
|
||||
结束前
|
||||
pod创建过程总可能有pending(创建但未调度完成),running(容器被创建,至少一个容器正常运行),succeeded(所有容器都正常停止),failed(至少一个容器非正常退出),unknown(未知原因无法知道pod状态)状态
|
||||
# k8s的service和endpoint是如何关联和相互影响的。
|
||||
service与pod关联,需要通过endpoint。endpoint是在service创建的时候,由k8s自动创建
|
||||
service要动态感知后端IP的变化,得介入一个endpoints控制器,也就是每个service都有对应一个endpoints控制器,endpoints帮它关联后端的pod,service 通过selector标签选择器关联pod, 具体实现动态IP变化由endpoints来实现
|
||||
|
||||
# 详述kube-proxy原理, 一个请求是如何经过层层转发落到某个pod上的整个过程。请求可能来自pod也可能来自外部。
|
||||
|
||||
# deployment/rs有什么区别。 其使用方式使用条件和原理是什么。
|
||||
# cgroup中的cpu有哪几种限制方式。 k8s是如何使用实现request和limit的。
|
||||
|
||||
# rc/rs 功能是怎么实现的。详述从 API 接收到一个创建 rc/rs 的请求,到最终在节点上创建 pod 的全过程,尽可能详细。另外,当一个 pod 失效时,kubernetes 是如何发现并重启另一个 pod 的?
|
||||
|
||||
# 设想一个一千台物理机,上万规模的容器的 kubernetes 集群,请详述使用 kubernetes 时需要注意哪些问题?应该怎样解决?(提示可以从高可用,高性能等方向,覆盖到从镜像中心到 kubernetes 各个组件等)
|
||||
# 设想 kubernetes 集群管理从一千台节点到五千台节点,可能会遇到什么样的瓶颈。应该如何解决。
|
||||
# kubernetes 的运营中有哪些注意的要点。
|
||||
# 集群发生雪崩的条件,以及预防手段。
|
||||
# 设计一种可以替代 kube-proxy 的实现。
|
||||
# sidecar 的设计模式如何在 k8s 中进行应用。有什么意义。
|
||||
# 灰度发布是什么。如何使用 k8s 现有的资源实现灰度发布。
|
||||
# 介绍 k8s 实践中踩过的比较大的一个坑和解决方式。
|
||||
# 什么是k8s
|
||||
Kubernetes是一个开源容器管理工具,负责容器部署,容器扩缩容以及负载平衡。作为Google的创意之作,它提供了出色的社区,并与所有云提供商合作。因此,我们可以说Kubernetes不是一个容器化平台,而是一个多容器管理解决方案
|
||||
|
||||
# docker与k8s的关系
|
||||
docker是一种容器,提供容器生命周期管理,但是本身不具备自愈,负载均衡的功能,而且不同容器(包括不同主机容器),若要互相通信,需要比较繁琐的配置
|
||||
|
||||
k8s专门用于容器编排组织,提供docker 缺少的功能
|
||||
|
||||
# 主机和容器上部署应用的区别
|
||||
主机上部署的应用共享系统的资源,静态库等,更新与配置比较麻烦,在新系统上部署的话,还需要配置环境,很容易出问题。
|
||||
容器部署应用,各应用资源隔离,应用本身可以快速迭代开发,一次部署,可以导出在其他地方运行,节约了环境配置时间。
|
||||
|
||||
# 什么是容器编排
|
||||
组成应用的一组容器,需要组织起来,协同合作,使应用按照既定的设计运行,这种组织流程就是容器编排
|
||||
|
||||
# 容器编排需要什么能力
|
||||
1、负载均衡
|
||||
2、自动伸缩
|
||||
3、故障转移
|
||||
4、监控状态
|
||||
5、自动恢复(自愈)
|
||||
6、回滚
|
||||
7、自动调度
|
||||
|
||||
# k8s特点
|
||||
1、自动调度
|
||||
2、自愈
|
||||
3、自动更新与回滚
|
||||
4、自动伸缩
|
||||
5、负载均衡
|
||||
|
||||
# k8s如何简化容器化部署
|
||||
k8s可以在不同主机,不同机房或云提供商上部署,需要负载均衡,网络配置,自动伸缩,监控等
|
||||
|
||||
# 对k8s集群了解多少
|
||||
# 什么是heapster
|
||||
Heapster是容器集群监控和性能分析工具,收集监控数据。heapster首相从master获取所有的node信息,然后通过这些node上的kubelet获取有用数据,,而kubelet的数据从cAdvisor得到,所有获取到的数据都被推送到heapster配置的后端存储中。
|
||||
# 什么是minikube
|
||||
单节点运行的k8s
|
||||
|
||||
# 什么是kubelet
|
||||
负责维护容器的生命周期,同时也负责Volume(CVI)和网络(CNI)的管理
|
||||
监控pod状态并通知给其他组件
|
||||
监控node并汇报给master
|
||||
负责为pod准备运行环境
|
||||
周期性执行pod中定义的指针
|
||||
|
||||
# 什么是kubectl
|
||||
k8s集群的命令行工具。可用通过kubectl完成对集群的管理(最后通过api-server)
|
||||
|
||||
# 对k8s一个节点有什么了解
|
||||
1、可以是物理机或虚拟机
|
||||
2、提供pod的运行环境
|
||||
3、被matser管理
|
||||
4、需要安装容器、kubelet、kube-proxy
|
||||
|
||||
# Kubernetes Architecture的不同组件有哪些
|
||||
|
||||
# 你对Kube-proxy有什么了解
|
||||
在k8s每个节点上运行,完成service与内部pod之间的数据转发,提供4层负载均衡
|
||||
|
||||
# 您能否介绍一下Kubernetes中主节点的工作情况
|
||||
# kube-apiserver和kube-scheduler的作用是什么
|
||||
kube-apiserver是外部管理集群的唯一入口,完成认证、授权、访问控制,api注册和发现等机制
|
||||
|
||||
kube-scheduler负责资源调度,安装预定的调度规则,把pod调度到相应的机器上
|
||||
|
||||
# 你能简要介绍一下Kubernetes控制管理器吗?
|
||||
负责维护集群状态,Kubernetes 自带的控制器例子包括副本控制器、节点控制器、命名空间控制器和服务账号控制器等
|
||||
# 什么是ETCD
|
||||
兼具一致性与高可用的键值数据库,用于存储k8s的集群状态
|
||||
# Kubernetes有哪些不同类型的service
|
||||
ClusterIP, NodePort,Load Balancer,External Name(外部Cname)
|
||||
# 你对Kubernetes的负载均衡器有什么了解
|
||||
两种分法,内部与外部,4层(kube-proxy)与7层(ingress)
|
||||
# 什么是Ingress网络,它是如何工作的
|
||||
Ingress定义了一组规则,是外部访问集群内部服务的入口,提供ssl,负载均衡等服务
|
||||
# 您对云控制器管理器有何了解
|
||||
# 什么是Container资源监控
|
||||
用户需要了解容器的性能及资源使用情况。就需要对容器,pod,服务,集群等各层级进行管理。heapster,influxdb
|
||||
|
||||
# Replica Set 和 Replication Controller之间有什么区别
|
||||
基本相同,但是rs可以支持集合的selector,rc支持基于等式的selector
|
||||
|
||||
# 什么是Headless Service
|
||||
与普通service不同的地方在于,没有ClusterIP,通过它直接访问后端的pod,中间没有代理,一般用于有状态服务
|
||||
# 使用Kubernetes时可以采取哪些最佳安全措施
|
||||
# 什么是集群联邦
|
||||
多个集群当成一个集群进行管理
|
||||
# cni
|
||||
container network interface,容器网络接口,是一个标准的,通用的接口。现在容器平台:docker,kubernetes,mesos,容器网络解决方案:flannel,calico,weave。只要提供一个标准的接口,就能为同样满足该协议的所有容器平台提供网络功能,而CNI正是这样的一个标准接口协议。
|
||||
CNI用于连接容器管理系统和网络插件。提供一个容器所在的network namespace,将network interface插入该network namespace中(比如veth的一端),并且在宿主机做一些必要的配置(例如将veth的另一端加入bridge中),最后对namespace中的interface进行IP和路由的配置。
|
||||
|
||||
CNI的工作是从容器管理系统处获取运行时信息,包括network namespace的路径,容器ID以及network interface name,再从容器网络的配置文件中加载网络配置信息,再将这些信息传递给对应的插件,由插件进行具体的网络配置工作,并将配置的结果再返回到容器管理系统中。
|
||||
# runc
|
||||
RunC 是一个轻量级的工具,它是用来运行容器的,只用来做这一件事,并且这一件事要做好。我们可以认为它就是个命令行小工具,可以不用通过 docker 引擎,直接运行容器。事实上,runC 是标准化的产物,它根据 OCI 标准来创建和运行容器
|
||||
Docker就是基于runC创建的,简单地说,runC是Docker中最为核心的部分,容器的创建,运行,销毁等等操作最终都将通过调用runC完成
|
||||
|
||||
# k8s控制器类型
|
||||
rs,deployment,daemonSet,statefulSet,service,pod,job,cronjob
|
||||
# k8s调度过程
|
||||
# 为什么要用systemd替换cgroup
|
||||
控制组用来约束分配给进程的资源。
|
||||
|
||||
当某个 Linux 系统发行版使用 systemd 作为其初始化系统时,初始化进程会生成并使用一个 root 控制组 (cgroup), 并充当 cgroup 管理器。 Systemd 与 cgroup 集成紧密,并将为每个 systemd 单元分配一个 cgroup。 你也可以配置容器运行时和 kubelet 使用 cgroupfs。 连同 systemd 一起使用 cgroupfs 意味着将有两个不同的 cgroup 管理器。
|
||||
|
||||
单个 cgroup 管理器将简化分配资源的视图,并且默认情况下将对可用资源和使用中的资源具有更一致的视图。 当有两个管理器共存于一个系统中时,最终将对这些资源产生两种视图。 在此领域人们已经报告过一些案例,某些节点配置让 kubelet 和 docker 使用 cgroupfs,而节点上运行的其余进程则使用 systemd; 这类节点在资源压力下会变得不稳定。
|
||||
|
||||
更改设置,令容器运行时和 kubelet 使用 systemd 作为 cgroup 驱动,以此使系统更为稳定。 对于 Docker, 设置 native.cgroupdriver=systemd 选项。
|
||||
|
||||
注意:非常 不 建议更改已加入集群的节点的 cgroup 驱动。 如果 kubelet 已经使用某 cgroup 驱动的语义创建了 pod,更改运行时以使用别的 cgroup 驱动,当为现有 Pods 重新创建 PodSandbox 时会产生错误。重启 kubelet 也可能无法解决此类问题。 如果你有切实可行的自动化方案,使用其他已更新配置的节点来替换该节点,或者使用自动化方案来重新安装
|
||||
|
||||
# k8s网络插件
|
||||
flannel,calico,cannel
|
||||
|
||||
# k8s三种认证方式
|
||||
token方式:比如新增node节点
|
||||
证书认证
|
||||
serviceaccount
|
||||
|
||||
# etcd备份恢复
|
||||
# pod,rs,deployment,svc,ingress如何关联
|
||||
|
||||
# 亲和性与反亲和性使用
|
||||
调度器默认情况下会优先选择资源充足,负载平均的节点进行调度。比如说,内部的代码服务器,不希望与其他应用服务共用节点,但是有时两个pod通信比较频繁,又会希望他们在同一个节点上,这就需要亲和性与反亲和性
|
||||
节点亲和性是指定pod在node上调度的约束。requiredDuringSchedulingIgnoredDuringExecution和preferredDuringSchedulingIgnoredDuringExecution,可以认为前一种是必须满足,如果不满足则不进行调度,后一种是倾向满足,不满足的情况下会调度到不符合条件的Node上。IgnoreDuringExecution表示如果在Pod运行期间Node的标签发生变化,导致亲和性策略不能满足,则继续运行当前的Pod。
|
||||
|
||||
pod亲和性,允许用户通过已经运行的Pod上的标签来决定调度策略,用文字描述就是“如果Node X上运行了一个或多个满足Y条件的Pod,那么这个Pod在Node应该运行在Pod X”,因为Node没有命名空间,Pod有命名空间,这样就允许管理员在配置的时候指定这个亲和性策略适用于哪个命名空间,可以通过topologyKey来指定。topology是一个范围的概念,可以是一个Node、一个机柜、一个机房或者是一个区域(如北美、亚洲)等,实际上对应的还是Node上的标签。
|
||||
|
||||
有两种类型
|
||||
|
||||
requiredDuringSchedulingIgnoredDuringExecution,刚性要求,必须精确匹配
|
||||
preferredDuringSchedulingIgnoredDuringExecution,软性要求
|
||||
|
||||
|
||||
反亲和性的应用,举例说明pod反亲和性,如果一个node上已经有了具有相同标签的pod,那么新node就不在上面部署,避免了节点故障导致的服务不可用
|
||||
node反亲和性,就是应用不部署在相应的服务器上。比如说这个应用可能产生很大流量,这个小出水口的服务器,明显不能满足需要,就不调度到这些节点上
|
||||
|
||||
# 污点,污点容忍场景
|
||||
当节点标记Taint时,除非Pod也被识别为可以容忍(Toleration)有污点的节点,否则默认情况下Kubernetes scheduler不会将Pod调度到有污点的节点上
|
||||
Taint 和 toleration 相互配合,可以用来避免pod被分配到不合适的节点上。每个节点上都可以应用一个或多个taint,这表示对于那些不能容忍这些taint的 pod,是不会被该节点接受的。如果将toleration应用于pod上,则表示这些pod可以(但不要求)被调度到具有相应taint的节点上
|
||||
|
||||
# pv与pvc
|
||||
|
||||
|
||||
# flannel三种模式,具体流程
|
||||
host-gw
|
||||
vxlan
|
||||

|
||||
udp。默认udp
|
||||

|
||||
|
||||
# 封包与解包是怎样的
|
||||
|
||||
# vxlan与hostgw区别
|
||||
|
||||
# calico三种模式,具体流程
|
||||
|
||||
# calico路由模式
|
||||
|
||||
# 怎么跨路由
|
||||
|
||||
# 多集群多租户如何管理
|
||||
|
||||
# 怎么理解crd
|
||||
custom resource defination
|
||||
|
||||
# 怎么理解operator
|
||||
|
||||
# service mesh
|
||||
|
||||
# istio osm nginx-service-mesh
|
||||
|
||||
# helm
|
||||
类似yum的,别人已经编辑好的
|
||||
|
||||
# traefic
|
||||
# harbor
|
||||
本地仓库
|
||||
|
||||
# promethues怎么监控k8s
|
||||
|
||||
# efk日志采集优缺点
|
||||
|
||||
# jenkins pipe
|
||||
|
||||
# sonarqube
|
19
CloudNative/Kubernetes/Base/集群切换.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
### 查看当前工作上下文
|
||||
```
|
||||
kubectl config view
|
||||
```
|
||||
|
||||
### 设置当前集群
|
||||
```
|
||||
kubectl config set current-context cluster-name
|
||||
```
|
||||
|
||||
### 获取集群
|
||||
```
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
### 切换集群
|
||||
```
|
||||
kubectl config use-context cluster-name
|
||||
```
|
61
CloudNative/Kubernetes/Base/静态pod.md
Normal file
|
@ -0,0 +1,61 @@
|
|||
静态 Pod 在指定的节点上由 kubelet 守护进程直接管理,不需要 API 服务器 监管。 与由控制面管理的 Pod(例如,Deployment) 不同;kubelet 监视每个静态 Pod(在它崩溃之后重新启动)。
|
||||
|
||||
静态 Pod 永远都会绑定到一个指定节点上的 Kubelet。
|
||||
|
||||
kubelet 会尝试通过 Kubernetes API 服务器为每个静态 Pod 自动创建一个 镜像 Pod。 这意味着节点上运行的静态 Pod 对 API 服务来说是可见的,但是不能通过 API 服务器来控制。
|
||||
|
||||
|
||||
可以通过文件系统上的配置文件 或者 web 网络上的配置文件 来配置静态 Pod
|
||||
### 文件系统上的静态 Pod 声明文件
|
||||
文件系统上的静态 Pod 声明文件
|
||||
声明文件是标准的 Pod 定义文件,以 JSON 或者 YAML 格式存储在指定目录。路径设置在 Kubelet 配置文件 的 staticPodPath: <目录> 字段,kubelet 会定期的扫描这个文件夹下的 YAML/JSON 文件来创建/删除静态 Pod。 注意 kubelet 扫描目录的时候会忽略以点开头的文件。
|
||||
1、选择一个需要运行静态pod的节点
|
||||
2、选择一个目录,创建yaml文件。如:/etc/kubelet.d目录,保存静态pod定义文件。定义文件与其他pod文件格式相同
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: static-web
|
||||
labels:
|
||||
role: myrole
|
||||
spec:
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
```
|
||||
3、配置这个节点上的 kubelet,使用这个参数执行 --pod-manifest-path=/etc/kubelet.d/:
|
||||
```
|
||||
KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/"
|
||||
```
|
||||
或者在kubelet配置文件中加上staticPodPath: <目录>字段
|
||||
4、重启 kubelet
|
||||
|
||||
### Web 网上的静态 Pod 声明文件
|
||||
Kubelet 根据 --manifest-url=<URL> 参数的配置定期的下载指定文件,并且转换成 JSON/YAML 格式的 Pod 定义文件。 与文件系统上的清单文件使用方式类似,kubelet 调度获取清单文件。 如果静态 Pod 的清单文件有改变,kubelet 会应用这些改变
|
||||
1、创建一个 YAML 文件,并保存在 web 服务上,为 kubelet 生成一个 URL。
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: static-web
|
||||
labels:
|
||||
role: myrole
|
||||
spec:
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
```
|
||||
2、通过在选择的节点上使用 --manifest-url=<manifest-url> 配置运行 kubelet。
|
||||
```
|
||||
KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --manifest-url=<manifest-url>"
|
||||
```
|
||||
3、重启 kubelet
|
||||
|
165
CloudNative/Kubernetes/Docs/access/harbor.conf
Normal file
|
@ -0,0 +1,165 @@
|
|||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: 172.19.0.50
|
||||
|
||||
# http related config
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: 9080
|
||||
|
||||
# https related config
|
||||
#https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
harbor_admin_password: Harbor12345
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 50
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 100 for postgres.
|
||||
max_open_conns: 100
|
||||
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disabled: false
|
||||
|
||||
# Clair configuration
|
||||
clair:
|
||||
# The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
|
||||
updaters_interval: 12
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
max_job_workers: 10
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
webhook_job_max_retry: 10
|
||||
|
||||
chart:
|
||||
# Change the value of absolute_url to enabled can enable absolute url in chart
|
||||
absolute_url: disabled
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 1.10.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
# clair:
|
||||
# host: clair_db_host
|
||||
# port: clair_db_port
|
||||
# db_name: clair_db_name
|
||||
# username: clair_db_username
|
||||
# password: clair_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_signer:
|
||||
# host: notary_signer_db_host
|
||||
# port: notary_signer_db_port
|
||||
# db_name: notary_signer_db_name
|
||||
# username: notary_signer_db_username
|
||||
# password: notary_signer_db_password
|
||||
# ssl_mode: disable
|
||||
# notary_server:
|
||||
# host: notary_server_db_host
|
||||
# port: notary_server_db_port
|
||||
# db_name: notary_server_db_name
|
||||
# username: notary_server_db_username
|
||||
# password: notary_server_db_password
|
||||
# ssl_mode: disable
|
||||
|
||||
# Uncomment external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# host: redis
|
||||
# port: 6379
|
||||
# password:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# chartmuseum_db_index: 3
|
||||
# clair_db_index: 4
|
||||
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
# no_proxy endpoints will appended to 127.0.0.1,localhost,.local,.internal,log,db,redis,nginx,core,portal,postgresql,jobservice,registry,registryctl,clair,chartmuseum,notary-server
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- clair
|
27
CloudNative/Kubernetes/Docs/access/k8s-key.pem
Normal file
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAwQvjNnp2ijKSBBv1EAflZU+P9PX3EglxDkyeAcsl1V97n8sG
|
||||
D3/ALYQHi/vDb3snWWgk46yXkc1Lw8v9d0B6armQO6942LSUtyxZNGCkCe78s+i7
|
||||
VgNsTfVb9otIGuQmQWcJ9usr60YCwGHCvKwCKb0nFu3iVS4G68vIwL82nw1BDCsp
|
||||
OtPompBuBl2/gs/0wja6ydBlkPCSeRp7+vPofBsT+vsCUc7sCkjzkXtdY44gy9Gp
|
||||
XViyjj70TbRqlk23ucsOtFpZW0QJ70UCZPa6fo5moTDI7oS8OshmAsNB+OenmYRP
|
||||
rfSv3ZmbR6l1tdG3wC3Nbf/2c+QcdeUjpbRfAwIDAQABAoIBAFlNU32HJ0EE5dEq
|
||||
0bHpxS+Zn6GVCwd4AKm5vbpcrJLJSdoAxDv1QASOOCOEgsMWeHx+ldoQpEkZbDwW
|
||||
KJm65SZr0xIZOVEeMFGQHXACaMADozjip47BpJ9nqnQhuGhgNGY68TwdTELZLzqp
|
||||
vrki5RlD3EpaH0KCOUdpDORFUFJAtIeq6Z+fEqyeMuuXILNcr1uBgVOXxAhfQLJl
|
||||
BvWLxF/vkab16WryA2Ly7NNelS8n27N1t1pwiG1FpTs68FBnCzlSR35LGko4Fmea
|
||||
K5/eA5QSYvbsK/DCW4rY9fasc97fmpe0TPnOAEuxcUfcO7RJ4974xxGcwp8JTH8X
|
||||
upkVzhECgYEA52nDe0oZxRQbKt2iqqIY6hg99kRu7FUewz1tk6YBivo1cEMKsSJN
|
||||
E2B+rcVdFB+Q0IwPx9F4UGSaIurfwGpC8TA5eV9oN/Q37Tl5OZ3A/NRrQfi9JbwN
|
||||
hZ+s3PfsIX7/fzHN0ic0Lbwr/Hx1y/s0j+Hy6jdksx4X9RopJS51pysCgYEA1Y6V
|
||||
O6LqzuArWLXSri0qKZwoiZJmmjCad1u29iQ8KLNSJC7ElPe2noR27DprlDE+REod
|
||||
X+CbCqQiKHIjcZOl9L+3bEWQZuLpJfWE05hBWzBOVaGYDLTzL+hxYTTFL933LQv6
|
||||
GuGlAUjWzOEPK9b9UN2KY3NfKhJ01lEilYYlO4kCgYEAg9lCw5Aotp0KDZZSBB5v
|
||||
S5GcXCAJgyXMbTkmoaVH48NDhz7rbJk6iPvFsYRtz/VcMTZcF2FFx6hKGfLwoVTp
|
||||
5zmuxXSSkK7Y3pSMeUNknaj5o5DxU/qdSbD8cLnhFASj1fiuaDpmBFjeRvEyvJHY
|
||||
FNKL8N62xUc0JzZMkx6xchkCgYACSnBBXoMESxVL809aPPPJ1Hn+18ov9zuA8kPp
|
||||
KgRXL20/JB1Jt8qGsscPy6Shx1FyrBTalW80UwRmyKqmqj3KfU/7YlelMuPrW1Hc
|
||||
bzKsRr1gX8ZgxIQjTGwC7ez7kXb4TbuWFDuP4vci5XsjLkAzYNz+KhuoOOB1+BLV
|
||||
5cV6eQKBgQCXlYRvlmr+Wc/mrFViNSSysjcNy6FhD1Sv1Y2/gcmBPHdtWlZBZDt/
|
||||
iCpZn7bdz8ACRRwmBAtLYUtQunn+F2rj11QRoqCExaY+xfrX/TkyDZMc0oPrUtds
|
||||
4GWhZGbez7BrSQ3fxDl+oYhOAjClrWt2h3sMawuJKpj/npb/6rm+dw==
|
||||
-----END RSA PRIVATE KEY-----
|
39
CloudNative/Kubernetes/Docs/access/k8s.conf
Normal file
|
@ -0,0 +1,39 @@
|
|||
upstream k8s-ingress-80 {
|
||||
server 172.19.0.53:80 max_fails=3 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream k8s-ingress-443 {
|
||||
server 172.19.0.53:443 max_fails=3 fail_timeout=10s;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name *.zeroc.net;
|
||||
|
||||
location / {
|
||||
proxy_pass http://k8s-ingress-80;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name *.zeroc.net;
|
||||
|
||||
ssl_certificate ssl/k8s.pem;
|
||||
ssl_certificate_key ssl/k8s-key.pem;
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location / {
|
||||
proxy_pass https://k8s-ingress-443;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
23
CloudNative/Kubernetes/Docs/access/k8s.pem
Normal file
|
@ -0,0 +1,23 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIID7DCCAtSgAwIBAgIUHwoRJXf4lOMd+I/TXJFniIIdSeYwDQYJKoZIhvcNAQEL
|
||||
BQAwbzELMAkGA1UEBhMCQ04xEjAQBgNVBAgTCUd1YW5nZG9uZzERMA8GA1UEBxMI
|
||||
U2hlbnpoZW4xDjAMBgNVBAoTBVByb2JlMRQwEgYDVQQLEwtEZXZlbG9wbWVudDET
|
||||
MBEGA1UEAxMKa3ViZXJuZXRlczAeFw0yMTAxMDYwNTA3MDBaFw0zMTAxMDQwNTA3
|
||||
MDBaMHAxCzAJBgNVBAYTAkNOMRIwEAYDVQQIEwlHdWFuZ2RvbmcxETAPBgNVBAcT
|
||||
CFNoZW56aGVuMQ4wDAYDVQQKEwVQcm9iZTEUMBIGA1UECxMLRGV2ZWxvcG1lbnQx
|
||||
FDASBgNVBAMTCyouemVyb2MubmV0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAwQvjNnp2ijKSBBv1EAflZU+P9PX3EglxDkyeAcsl1V97n8sGD3/ALYQH
|
||||
i/vDb3snWWgk46yXkc1Lw8v9d0B6armQO6942LSUtyxZNGCkCe78s+i7VgNsTfVb
|
||||
9otIGuQmQWcJ9usr60YCwGHCvKwCKb0nFu3iVS4G68vIwL82nw1BDCspOtPompBu
|
||||
Bl2/gs/0wja6ydBlkPCSeRp7+vPofBsT+vsCUc7sCkjzkXtdY44gy9GpXViyjj70
|
||||
TbRqlk23ucsOtFpZW0QJ70UCZPa6fo5moTDI7oS8OshmAsNB+OenmYRPrfSv3Zmb
|
||||
R6l1tdG3wC3Nbf/2c+QcdeUjpbRfAwIDAQABo38wfTAOBgNVHQ8BAf8EBAMCBaAw
|
||||
HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD
|
||||
VR0OBBYEFB+lGPXGnAUwWHQjRzs8iCgZE8LkMB8GA1UdIwQYMBaAFGeJYv/N7NJT
|
||||
/rsyZtv35zNFbb5DMA0GCSqGSIb3DQEBCwUAA4IBAQCsIpw3Yz6ymF1gjfDScZGv
|
||||
q0MNP+TezHs6qiA9ex93Za/VV1qf2Fgj1kyoQfQU0ysBKftTUa65LU9XyOXDOBAA
|
||||
h9McrKRBfKhX8zyfIfob2n5ygZt3x6atiNw5O72hI3lvfcmGiBMOAtSJipvc8fW0
|
||||
ibx5NM4UF4p5fq3mLNS5uP+oxA0V65X6t1SfoEHBYHbgjefoeLGG5y7AVapt9AMd
|
||||
qplVYop8RuwUeb5fctkDpY+Ib6lgunoJ7HXsRD5o8SYkHVChq4AF3o5zFBsLbV4i
|
||||
N8Ppa2whLblcogFrLipBjY1BzhOsIfqqK8dwmcJK7/gZDUh0pl0ZUMW+v2Gb0l9z
|
||||
-----END CERTIFICATE-----
|
44
CloudNative/Kubernetes/Docs/access/nginx.conf
Normal file
|
@ -0,0 +1,44 @@
|
|||
#user upload_00;
|
||||
worker_processes 1;
|
||||
|
||||
#error_log logs/error.log;
|
||||
#error_log logs/error.log notice;
|
||||
error_log logs/error.log info;
|
||||
|
||||
pid logs/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log logs/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
#keepalive_timeout 0;
|
||||
keepalive_timeout 65;
|
||||
|
||||
gzip on;
|
||||
gzip on;
|
||||
gzip_min_length 1k;
|
||||
gzip_buffers 4 16k;
|
||||
gzip_http_version 1.0;
|
||||
gzip_comp_level 2;
|
||||
gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript application/x-httpd-php image/jpeg image/gif image/png application/vnd.ms-fontobject font/ttf font/opentype font/x-woff image/svg+xml;
|
||||
gzip_vary on;
|
||||
gzip_disable "MSIE [1-6]\.";
|
||||
|
||||
|
||||
include vhost/*.conf;
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
tls:
|
||||
secretName: dashboard-tls
|
||||
routes:
|
||||
- match: Host(`dashboard.zeroc.net`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: kubernetes-dashboard
|
||||
port: 443
|
297
CloudNative/Kubernetes/Docs/addons/dashboard/init/dashboard.yaml
Normal file
|
@ -0,0 +1,297 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
20
CloudNative/Kubernetes/Docs/addons/dashboard/token/rbac.yaml
Normal file
|
@ -0,0 +1,20 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: admin-user
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: admin-user
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: admin-user
|
||||
namespace: kube-system
|
BIN
CloudNative/Kubernetes/Docs/addons/efk/elastic-certificates.p12
Normal file
BIN
CloudNative/Kubernetes/Docs/addons/efk/elastic-stack-ca.p12
Normal file
633
CloudNative/Kubernetes/Docs/addons/efk/official/operator.yaml
Normal file
|
@ -0,0 +1,633 @@
|
|||
# Source: eck-operator/templates/operator-namespace.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elastic-system
|
||||
labels:
|
||||
name: elastic-system
|
||||
---
|
||||
# Source: eck-operator/templates/service-account.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: elastic-webhook-server-cert
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
---
|
||||
# Source: eck-operator/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
data:
|
||||
eck.yaml: |-
|
||||
log-verbosity: 0
|
||||
metrics-port: 0
|
||||
container-registry: docker.elastic.co
|
||||
max-concurrent-reconciles: 3
|
||||
ca-cert-validity: 8760h
|
||||
ca-cert-rotate-before: 24h
|
||||
cert-validity: 8760h
|
||||
cert-rotate-before: 24h
|
||||
set-default-security-context: true
|
||||
kube-client-timeout: 60s
|
||||
elasticsearch-client-timeout: 180s
|
||||
disable-telemetry: false
|
||||
validate-storage-class: true
|
||||
enable-webhook: true
|
||||
webhook-name: elastic-webhook.k8s.elastic.co
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "authorization.k8s.io"
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- endpoints
|
||||
- events
|
||||
- persistentvolumeclaims
|
||||
- secrets
|
||||
- services
|
||||
- configmaps
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
resources:
|
||||
- elasticsearches
|
||||
- elasticsearches/status
|
||||
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
- enterpriselicenses
|
||||
- enterpriselicenses/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
resources:
|
||||
- kibanas
|
||||
- kibanas/status
|
||||
- kibanas/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
resources:
|
||||
- apmservers
|
||||
- apmservers/status
|
||||
- apmservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- enterprisesearches/status
|
||||
- enterprisesearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
resources:
|
||||
- beats
|
||||
- beats/status
|
||||
- beats/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
resources:
|
||||
- agents
|
||||
- agents/status
|
||||
- agents/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- maps.k8s.elastic.co
|
||||
resources:
|
||||
- elasticmapsservers
|
||||
- elasticmapsservers/status
|
||||
- elasticmapsservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "elastic-operator-view"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
rules:
|
||||
- apiGroups: ["elasticsearch.k8s.elastic.co"]
|
||||
resources: ["elasticsearches"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apm.k8s.elastic.co"]
|
||||
resources: ["apmservers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["kibana.k8s.elastic.co"]
|
||||
resources: ["kibanas"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
|
||||
resources: ["enterprisesearches"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["beat.k8s.elastic.co"]
|
||||
resources: ["beats"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["agent.k8s.elastic.co"]
|
||||
resources: ["agents"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["maps.k8s.elastic.co"]
|
||||
resources: ["elasticmapsservers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "elastic-operator-edit"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
rules:
|
||||
- apiGroups: ["elasticsearch.k8s.elastic.co"]
|
||||
resources: ["elasticsearches"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["apm.k8s.elastic.co"]
|
||||
resources: ["apmservers"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["kibana.k8s.elastic.co"]
|
||||
resources: ["kibanas"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
|
||||
resources: ["enterprisesearches"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["beat.k8s.elastic.co"]
|
||||
resources: ["beats"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["agent.k8s.elastic.co"]
|
||||
resources: ["agents"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["maps.k8s.elastic.co"]
|
||||
resources: ["elasticmapsservers"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
---
|
||||
# Source: eck-operator/templates/role-bindings.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elastic-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 9443
|
||||
selector:
|
||||
control-plane: elastic-operator
|
||||
---
|
||||
# Source: eck-operator/templates/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: elastic-operator
|
||||
serviceName: elastic-operator
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# Rename the fields "error" to "error.message" and "source" to "event.source"
|
||||
# This is to avoid a conflict with the ECS "error" and "source" documents.
|
||||
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
|
||||
"checksum/config": 032e84bdd1e85533291d73835756b3ef2b86d606c6281a446ad3703106703562
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
serviceAccountName: elastic-operator
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
- image: "docker.elastic.co/eck/eck-operator:1.8.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
args:
|
||||
- "manager"
|
||||
- "--config=/conf/eck.yaml"
|
||||
- "--distribution-channel=all-in-one"
|
||||
env:
|
||||
- name: OPERATOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: WEBHOOK_SECRET
|
||||
value: elastic-webhook-server-cert
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: https-webhook
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/conf"
|
||||
name: conf
|
||||
readOnly: true
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: elastic-operator
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: elastic-webhook-server-cert
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: elastic-webhook.k8s.elastic.co
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "1.8.0"
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-agent-k8s-elastic-co-v1alpha1-agent
|
||||
failurePolicy: Ignore
|
||||
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- agents
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-apm-k8s-elastic-co-v1-apmserver
|
||||
failurePolicy: Ignore
|
||||
name: elastic-apm-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- apmservers
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-apm-k8s-elastic-co-v1beta1-apmserver
|
||||
failurePolicy: Ignore
|
||||
name: elastic-apm-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- apmservers
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-beat-k8s-elastic-co-v1beta1-beat
|
||||
failurePolicy: Ignore
|
||||
name: elastic-beat-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- beats
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-ent-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-ent-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-elasticsearch-k8s-elastic-co-v1-elasticsearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-es-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- elasticsearches
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-es-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- elasticsearches
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-kibana-k8s-elastic-co-v1-kibana
|
||||
failurePolicy: Ignore
|
||||
name: elastic-kb-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- kibanas
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-kibana-k8s-elastic-co-v1beta1-kibana
|
||||
failurePolicy: Ignore
|
||||
name: elastic-kb-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: "None"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- kibanas
|
||||
|
150
CloudNative/Kubernetes/Docs/addons/efk/v1/es.yaml
Normal file
|
@ -0,0 +1,150 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
namespace: infra
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
selector:
|
||||
app: elasticsearch
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9200
|
||||
name: db
|
||||
- port: 9300
|
||||
name: transport
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
namespace: infra
|
||||
data:
|
||||
elasticsearch.yml: |
|
||||
cluster.name: "elasticsearch"
|
||||
node.name: "${POD_NAME}"
|
||||
cluster.initial_master_nodes: "elasticsearch-0,elasticsearch-1,elasticsearch-2"
|
||||
#discovery.zen.minimum_master_nodes: 2
|
||||
#gateway.auto_import_dangling_indices: true
|
||||
discovery.seed_hosts: "elasticsearch"
|
||||
network.host: 0.0.0.0
|
||||
http.cors.enabled: true
|
||||
http.cors.allow-origin: "*"
|
||||
http.cors.allow-headers: "Authorization,X-Requested-With,Content-Length,Content-Type"
|
||||
xpack.security.enabled: true
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.transport.ssl.verification_mode: certificate
|
||||
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
|
||||
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: elastic-certificates
|
||||
namespace: infra
|
||||
binaryData:
|
||||
elastic-certificates.p12: MIINbwIBAzCCDSgGCSqGSIb3DQEHAaCCDRkEgg0VMIINETCCBW0GCSqGSIb3DQEHAaCCBV4EggVaMIIFVjCCBVIGCyqGSIb3DQEMCgECoIIE+zCCBPcwKQYKKoZIhvcNAQwBAzAbBBQh3bOIoRgcJBEpfFRYPCSWGTKuowIDAMNQBIIEyI0xYFH61MsqoUbVtkoByYLM/GDkzSTs+M0pQXHih0/m0prYX6AH9H/y8J4te0/eVGJPAHid/SwvMpXCsC4B0YedHEBuFpJaFszAwLyudNDW3kk4jXJCQDaKP8CZhND3jZI74aH8iFqruGhh52orW2R7AH24Q6oJnzzKuGUjTbYaqOMW3aqjk891shPZAdEZ6UbH5I/qY+UFzJfqwdZ2C3vLKKOc5qR5rLxi9IbRWOKISpK6hVwz//8J/PYqhqTW4hipZebgcRbQgyg6rvlucjHcUSTPwcrE6bWlI0EB1FRc1k/oB2+GLCD5PbnVurlc7Vex6D/leODIlxsm6rLIXMRWQrUyaY6qTXQMT5t9RWarsrR+LYuQawTPY2O4TIYElLFAMWDcvAbZR5o/oJ3tDM/Tr/M1G3IdlWo5+HirQ99xBSlCsn80Dyxb+ty+3tALQ1ZQ7TS+f6EtHfRHyuywnmPgnP2eQZVwJOLgkf6s3gS4miiDabmrWke+gCOQga5z1YIuQZJzS60GYmyK5iTy7dxz85Ws+jXBInRn5lC8FbYC9NaIGKheCZsn06ELPNuHddsBF5ms1rClr+YqUvYksu3iedKs7CCv9z1I15QOk/XQWXx0Xhnftakqtz4XBGKLFn5zRhkK2aZZK2lmZFpnt+3HairqUoG8OlaX5VWav05ivqhQZHbtgjwiEGG9HW3LWwpixnViA21T3dpQo6qzbKaR/QUZUvwcZVfF0AzqcE2wQemunYDmXsx6wrTRuqqW9BY20J+JFQB0GMoTXdX6BXO4uniWPnAzRzx8lWtiADXN9Nn40WNxLO9pMvEwAQ8uzuS7xLdcG0yYqfcbhTvLR7IYiFVzMKYwL/E9IofUDjBSdvUBbJomKSnQYXvFxQehABQMlCsVcjaRfz/rITiJKLCEo38a0MB0cRP6GN23j2qHBAQqURa5ZYevfS3HNQin0D6gI4MQbw9AgrGVtH62thCwBcjT6uwRxWu4bpio3XIYk7W5dVhySRMe+Zva3Xm9MVXN3ENtMojSXR9zURLvm00bQe3CxruIZ5pBn34tFpfUZpzJiS4pQK1opcr3DnKuQKKQJ6OzEq3ajTdM5OKS2RT5tUdvYQ0wvPeGA1o8zqgdliMOK94Hz3vPkuxmTwJMUhrXwbvQGxTpZkCPTuWVQk5EV9efgbgkuWqKEbO5LJeWJdyZyDq1FAJS7+HMKSWcHtXvDWWP3qd721J5H/UXcoeclDDj64JYxND5qFMKDZrozizaMjPCK0qHc+G1p7R5ywpOKQ2tCLYMklvwV8megbKLb78KL7alOo84Eanl13SvNqMr8F42j/UR3oQ78TWI0DUTw4QsfWW9LJiGZhwB3SgUB2jTbmel0673OlcE739HEQ1JAGand4DpdxOrgM7M6NkJchfKuU7jgcK0Lyhim+bfZq9szaNO0350S9mbtmvK+ZMLGUIWwRB0x1WlECJyySf8ljPQKe/hJryrrfNwbgM/mE+9AiKdum+jvB3D+njFB1b8yo1sQ1f2hmSr33gRBH5kA8sF+aFfSs7S1ncG0dEGQOhiSnW+Qdq4tKO8syxdINYJXshwo5VsAaoPd89WNXjHj7H48DVT+/sjfxw3DYGW2YSTqLWLTDFEMB8GCSqGSIb3DQEJFDESHhAAaQBuAHMAdABhAG4AYwBlMCEGCSqGSIb3DQEJFTEUBBJUaW1lIDE2MjI2OTE4MTg4NjkwggecBgkqhkiG9w0BBwagggeNMIIHiQIBADCCB4IGCSqGSIb3DQEHATApBgoqhkiG9w0BDAEGMBsEFPrwGj8DM39WaDEUYeYZOBdORDLfAgMAw1CAggdIcNTxiLvbMk1P5NhcU9xOGNmYkU69laxNac6J/AXV3XFR/IrZZ74mB2ZaQH+rBMqPHfrYstpRdEMnCHmBzR9ff/ehlu5pfr44GYdt1TUDODM/ShzQM3iWYfW5sAI0ArFz7izkoTEFBN1R4v5vGLHwwXP8ZTczhw0RrxaxpYj+8rik5xHZPgwfMFsO+FncQLGfrYD4/Jx0uHeOdjKQ3xmktkwlB++MHcWwtqa+b8qy6+ztNgLbTUSKiXZcqdBFuDb8rfRTYoJF2hvKsHL3hDpY5Kym8HSeUiwgMiWkEv4WF+wjPZ8ERx8W2sTPApGJKIod9Rr4qCgotPetdGPgeiYeuKBJCglIwmCqgRnE8klMxlvWzaAx7GEGum+92Qm+owIgKzUy3Yd3P0P6g0tqH/XDT2duwpherJ/Fxggy+FFQwnXIqrqMwmix1uk4F1TjjGbe6fMqLtPXEWzLUUsJl1wYy0CvFy8DCbqx8v3B9M63z+Yp19wck04MrwAWtOMAVq2IV4wWWKAqrkOSqplBTHPi9w6jLRMVFBJgCu8BrTZVXkREv0bh97uY11efD++w33cKvZORJlSVJH2ekJ1jrh4lBdeGyqVTjEJBYW7tQ0INg/Sf35sf8fTuhgISmXS8VfrONu77ICYqu86KKZocfQciBxQLEmmEhYxIXCMEYswSu6lnMCzD9l9DIXx8dPNImhBd8QgzwFb+80mGs5Y7+0u4WSniYJy38SyxtOSGpHRP5+qTGD9psssHohM5lhuLMczZqlesy0XSsT4S2FNjaf6s7D91ET0n8tTwdwpjFRl8CKRABS6KEs1R48l3bwavUcF5xQ+sjZ96nBvDlbq5SdRn5VmZ4hnLuV3GbZX3Yip/SJUUMkYmEMMyk6q5X0dWfbVC7/INdNc/Lh5ooCGdelH2lqxU+wCEWhMfwz/phAilxCIgPWdtChe2QBKRM5b5F/Eb14i6xJVGYDzs1j1vARDi9Mx2Buc+VTqKwQYvnIZHdkqGVjFCC40Iq3UJ43D8CB7v98huYdj/DYzKCVWFOLJm77qoT3A9iT00uPtgtjIk/RG6NkzB9YCuiJ6DR5cHa7Udklii7UUviQPtUAbYvF4IPAtZcCSjs8VeX9LEPo6D6hDup4T1fakpAc5vpbSosTbAPTu97yBdo0XGyBwPRFd0mAQ6AdQiiKNy5l/M0E6U9SNv687opse6xJqTpfB4US+wgh/N4LOK9o3Z9wwS2f3FpeLpoFJBwa6rW0O5GXzx7L90kU9OZzU6XlIJXqEUItihMrQVYy2GaJQ1y/WJv7AZVmVM+yLaGfQ/Jg08LRQuV6b1DGVgH9TdPxihNPqAOcfWoCejaF+I05gadBFqwyqdxZtVm9I5wmNxRu1pYRoKY6hT12l64ZSAFeM5/yXz47+Jsy7FvGKlz6XM8uAHHIcYLJUESUW6Yz89FH0/YbNpkMCrE0bYjy1S1p/FXJxTlQW3YHaVdT6r0kH5XkGHqYVKKzqQ81Kflm+TsNfo9TbbF/E6PbKGV/9S0nITsGBu76PBRdcEKxOg/x/fxdIXtm6n0/88Xvcz/Uzz7J7LKro9nUQFB2FUtaejwtn+5CFdigZq9WY2IZPEK6hV/RGtuDVuX7xlAug/o5IyB8mbBiPMV0jKBy1A/2YE4p5NdElsx6vd3goOIHOztV6ZTaNnemmvV5u30ZuLafsRxKlX+tJDblDwWm9lmhATfrkJuoLoPw04g1OS9ayNdgifWeh5IqS2SOZ1R7pSqyq75aYgxMRM5GsMDKlLwfRnF1dCgwJA0Wk24P4ba1b+jf0GRAa7OF1LkZmzktUIEDMIMsgg1wlejYAGV4oec/n1HW7nSvr+yQAkxuiBYWUwvm+0MntSdNvaR1zMnEwNsTBi3/4VQT+ujtzEfSIFv6B4Lxz/Tod1JrwatZofq+Rspw0W6uDgWODhY0IMoFU8Qdd5BkatgoipCAFTWOWrwxLZ/UG671u1VZgLN6Pno4XQkMAnDwn/bzzf8OthiKlxhkK8onH2hucxl3wveAC5CiBFpO2ZOZet/ZVa66AzOY5YD7uNLcFZ2B9CPaGp6BEDrewRvzJmVWpxUjRNgYQ5RkAzk9iwMxYxZFRt1UrFTSnoIlGtlAvBxuBgqH0HsV+B3ikK/qp4nCx/Qkr/Uyv/BPYjk5Z8LVaOkpOEYlY9iYYeJfcBUsJNOKrX1Zv+mrJsFg04uI/660XYVQCA+tbs2siyGiIG6+4nqp3NTmfAkgBWacKTzrqJX5Tz3vTB/5g+uNiU4MH7vK7B1WDaMaqsfZby9vL7dFskky+JL5foBI2WpNwT1YRLMplZ8uNWahuW5+7TK+aMsYpsRPpYuNC5xaPfz1cPq5HL7zhrEN/FKk0zvK7wl5db+usyoR7QjmqjIGsiOZ8kHH/OTFbgzjIJd9PUuiC6p8TtKwF9ZXwMiUqOjbzCnmzoN5NuLxzP+yy7rheOLtj7cCYBkCmjm98x4oqCNjA+MCEwCQYFKw4DAhoFAAQU82fJw1XA6rDoMF+w0V0VFVpU/nUEFB3lj4GTt6Bs7R2w5r+0GFdvXboKAgMBhqA=
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
namespace: infra
|
||||
spec:
|
||||
serviceName: elasticsearch
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- name: elasticsearch
|
||||
image: 172.16.0.113:9080/infra/elasticsearch:7.9.0
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command: ["/bin/bash", "-c", "sysctl -w vm.max_map_count=262144; ulimit -l unlimited"]
|
||||
ports:
|
||||
- name: db
|
||||
containerPort: 9200
|
||||
- name: transport
|
||||
containerPort: 9300
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
requests:
|
||||
cpu: 1000m
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /usr/share/elasticsearch/data
|
||||
- name: plugins
|
||||
mountPath: /usr/share/elasticsearch/plugins
|
||||
- name: config
|
||||
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
|
||||
subPath: elasticsearch.yml
|
||||
- name: cert
|
||||
mountPath: /usr/share/elasticsearch/config/elastic-certificates.p12
|
||||
subPath: elastic-certificates.p12
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms512m -Xmx512m"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: elasticsearch
|
||||
- name: cert
|
||||
configMap:
|
||||
name: elastic-certificates
|
||||
imagePullSecrets:
|
||||
- name: harbor
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: managed-nfs-storage
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
- metadata:
|
||||
name: plugins
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: managed-nfs-storage
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Mi
|
||||
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: elasticsearch-ing
|
||||
namespace: infra
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
- web
|
||||
routes:
|
||||
- match: Host(`es.tst.qianqianshijie.com`)
|
||||
kind: Rule
|
||||
services:
|
||||
- name: elasticsearch
|
||||
port: 9200
|
92
CloudNative/Kubernetes/Docs/addons/efk/v1/fluentd.yaml
Normal file
|
@ -0,0 +1,92 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: infra
|
||||
labels:
|
||||
app: fluentd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
app: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: infra
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: infra
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4.2-debian-elasticsearch-1.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch.infra.svc.cluster.local"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: disable
|
||||
resources:
|
||||
limits:
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
90
CloudNative/Kubernetes/Docs/addons/efk/v1/kibana.yaml
Normal file
|
@ -0,0 +1,90 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kibana
|
||||
namespace: infra
|
||||
labels:
|
||||
k8s-app: kibana
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "Kibana"
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: ui
|
||||
selector:
|
||||
k8s-app: kibana
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kibana
|
||||
namespace: infra
|
||||
data:
|
||||
kibana.yml: |
|
||||
server.port: 5601
|
||||
server.host: "0"
|
||||
kibana.index: ".kibana"
|
||||
xpack.security.encryptionKey: "something_at_least_32_characters"
|
||||
elasticsearch.hosts: ["http://elasticsearch:9200"]
|
||||
elasticsearch.username: kibana_system
|
||||
#elasticsearch.password: "JD#eVmvT%AHmD*jxkSFq"
|
||||
elasticsearch.password: "htUERsH3R5BoQfWITO3h"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana
|
||||
namespace: infra
|
||||
labels:
|
||||
k8s-app: kibana
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kibana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: kibana
|
||||
image: 172.16.0.113:9080/infra/kibana:7.9.0
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
cpu: 1000m
|
||||
requests:
|
||||
cpu: 100m
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/share/kibana/config
|
||||
#livenessProbe:
|
||||
# httpGet:
|
||||
# path: /api/status
|
||||
# port: ui
|
||||
# initialDelaySeconds: 5
|
||||
# timeoutSeconds: 10
|
||||
#readinessProbe:
|
||||
# httpGet:
|
||||
# path: /api/status
|
||||
# port: ui
|
||||
# initialDelaySeconds: 5
|
||||
# timeoutSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: kibana
|
||||
imagePullSecrets:
|
||||
- name: harbor
|
99
CloudNative/Kubernetes/Docs/addons/efk/v2/es.yaml
Normal file
|
@ -0,0 +1,99 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
namespace: kube-logging
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
selector:
|
||||
app: elasticsearch
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: 9200
|
||||
name: rest
|
||||
- port: 9300
|
||||
name: inter-node
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: es-cluster
|
||||
namespace: kube-logging
|
||||
spec:
|
||||
serviceName: elasticsearch
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- name: elasticsearch
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
requests:
|
||||
cpu: 100m
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: rest
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: inter-node
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /usr/share/elasticsearch/data
|
||||
env:
|
||||
- name: cluster.name
|
||||
value: k8s-logs
|
||||
- name: node.name
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: discovery.seed_hosts
|
||||
value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"
|
||||
- name: cluster.initial_master_nodes
|
||||
value: "es-cluster-0,es-cluster-1,es-cluster-2"
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms512m -Xmx512m"
|
||||
initContainers:
|
||||
- name: fix-permissions
|
||||
image: busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /usr/share/elasticsearch/data
|
||||
- name: increase-vm-max-map
|
||||
image: busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["sysctl", "-w", "vm.max_map_count=262144"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
- name: increase-fd-ulimit
|
||||
image: busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["sh", "-c", "ulimit -n 65536"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: do-block-storage
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
92
CloudNative/Kubernetes/Docs/addons/efk/v2/fluentd.yaml
Normal file
|
@ -0,0 +1,92 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: kube-logging
|
||||
labels:
|
||||
app: fluentd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
app: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: kube-logging
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: kube-logging
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4.2-debian-elasticsearch-1.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch.kube-logging.svc.cluster.local"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: disable
|
||||
resources:
|
||||
limits:
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
44
CloudNative/Kubernetes/Docs/addons/efk/v2/kibana.yaml
Normal file
|
@ -0,0 +1,44 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kibana
|
||||
namespace: kube-logging
|
||||
labels:
|
||||
app: kibana
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
selector:
|
||||
app: kibana
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana
|
||||
namespace: kube-logging
|
||||
labels:
|
||||
app: kibana
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kibana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana
|
||||
image: docker.elastic.co/kibana/kibana:7.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
requests:
|
||||
cpu: 100m
|
||||
env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200
|
||||
ports:
|
||||
- containerPort: 5601
|
5
CloudNative/Kubernetes/Docs/addons/efk/v3/class.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: do-block-storage
|
||||
provisioner: example.com/nfs
|