mongodb-访问控制

想要基于角色的访问控制和数据库账号权限访问,那么我们先做好一个没有权限的集群,在此基础上一点点改

集群内置角色

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
wxqset:PRIMARY> use admin
switched to db admin
wxqset:PRIMARY> show roles;
{
"role" : "__queryableBackup",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "__system",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "backup",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "clusterAdmin",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "clusterManager",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "clusterMonitor",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "dbAdmin",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "dbAdminAnyDatabase",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "dbOwner",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "enableSharding",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "hostManager",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "read",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "readAnyDatabase",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "readWrite",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "readWriteAnyDatabase",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "restore",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "root",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "userAdmin",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}
{
"role" : "userAdminAnyDatabase",
"db" : "admin",
"isBuiltin" : true,
"roles" : [ ],
"inheritedRoles" : [ ]
}

创建最高权限

1
db.createUser({user: "root",pwd: "123.com",roles: [{role: "root",db: "admin"}]})

为单个库设置用户读写权限

1
db.createUser({user: "user01",pwd: "123456",roles: [{role: "readWrite",db: "db01"}]})

其它权限示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
wxqset:PRIMARY> db.createUser({user: "root",pwd: "123456",roles: [{role: "userAdminAnyDatabase",db: "admin"}]})
Successfully added user: {
"user" : "root",
"roles" : [
{
"role" : "userAdminAnyDatabase",
"db" : "admin"
}
]
}

wxqset:PRIMARY> db.createUser({user: "admin",pwd: "123.com",roles: [{role: "clusterAdmin",db: "admin"}]})
Successfully added user: {
"user" : "admin",
"roles" : [
{
"role" : "clusterAdmin",
"db" : "admin"
}
]
}

数据库用户查询

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
xxx:PRIMARY> use admin
xxx:PRIMARY> db.auth("username","pwd")
xxx:PRIMARY> use db01
xxx:PRIMARY> db.auth("username","pwd")
xxx:PRIMARY> show users;
{
"_id" : "admin.admin",
"user" : "admin",
"db" : "admin",
"roles" : [
{
"role" : "clusterAdmin",
"db" : "admin"
},
{
"role" : "userAdminAnyDatabase",
"db" : "admin"
}
]
}
{
"_id" : "admin.root",
"user" : "root",
"db" : "admin",
"roles" : [
{
"role" : "readWrite",
"db" : "local"
}
]
}

xxx:PRIMARY> db.system.users.find().pretty()

开启集群认证需要keyFile,所以创建keyFile文件,集群节点保持文件内容一致

1
2
3
openssl rand -base64 756 > /etc/mongod.keys
chmod 400 /etc/mongod.keys
chown mongod:mongod /etc/mongod.keys

集群所有节点修改cat /etc/mongod.conf

1
2
3
4
security:
authorization: enabled
keyFile: /etc/mongod.keys
clusterAuthMode: keyFile

修改好配置后重启即可

1
# systemctl  restart mongod

总结

1 keyFile属主属组及权限是注意点
2 权限这块理解好各个角色的作用域

mongodb-01集群

在mongodb集群中,只有主节点可以读写,其它节点可以提升为主节点后才能读写,从节点只能读
安装mongo数据库

1
2
3
4
5
6
7
8
[root@node03 ~]# cat  /etc/yum.repos.d/mongo.repo 
[mongodb-org-4.4]
name=MongoDB Repository
baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/4.4/x86_64/
gpgcheck=1
enabled=1
gpgkey=https://www.mongodb.org/static/pgp/server-4.4.asc
yum install -y mongodb-org

修改配置文件参与集群配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
[root@node03 ~]# cat /etc/mongod.conf 
# mongod.conf

# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/

# where to write logging data.
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log

# Where and how to store data.
storage:
dbPath: /var/lib/mongo
journal:
enabled: true
# engine:
# wiredTiger:

# how the process runs
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
port: 27017
bindIp: 0.0.0.0 # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.


#security:

#operationProfiling:

replication: # 开启复制集
replSetName: wxqset
#sharding:

## Enterprise-Only Options

#auditLog:

#snmp:

复制集集群基本步骤

1
2
3
4
5
6
7
8
9
10
11
1 集群所有节点修改配置文件replSetName: testSet 并启动mongod
2 在需要作为主节点的机器上 rs.initiate() 初始化配置
3 在主节点上添加从节点 rs.add("192.168.1.10:27017")
4 在从节点机器设置 rs.secondaryOk(), 用一些命令测试验证集群正常工作rs.status() show dbs;
5 在从节点机器上添加数据测试验证同步情况
use db01;
for (i=1;i<=10000;i++) db.students.insert({name: "student"+i,age:(i%120),address:"BJ"})
6 从节点进行查询验证
use db01;
db.students.find().count()
db.getCollectionNames()

查看集群状态

1
2
3
rs.help()
rs.status()
rs.conf()

以上三节点集群配置,如果在你的集群中资源较少并且其中一台主机不想参与集群复制,出于节省资源目的,那么可以让第三个节点成为Arbiter仲裁节点,因为偶数节点复制情况下,脑裂情况下无法选举出主节点。

那么在上面集群基础上踢除一个节点出来作为Arbiter仲裁节点
在集群主节点上面执行踢除操作

1
rs.remove("10.10.10.30:27017")

回到仲裁节点

1
2
3
4
[root@node03 ~]# cat /etc/mongod.conf | grep dbPath
dbPath: /var/lib/mongo
[root@node03 ~]# rm -rf /var/lib/mongo/*
[root@node03 ~]# systemctl start mongod

再次回到主节点添加仲裁节点,并且可以在2个复制节点间主从切换测试了

1
2
3
rs.addArb("10.10.10.30:27017")
rs.status()
rs.stepDown() # 降级为从节点

手动调整优先级触发选举,优先级高会变为主节点, 需要在主节点上操作

1
2
3
4
5
rs.conf()   # 查看各节点ID
cfg=rs.conf()
cfg.members[ID].priority=8
rs.reconfig(cfg)
rs.isMaster()

总结

什么情况下会触发选举
1 副本集初始化时rs.initial()
2 从节点联系不到主节点时
3 主节点收到rs.stepDown()命令时(主节点下线)
4 某从节点有更高的优先级且己经满足成为主节点的所有条件
5 主节点己经联系不到副本集的多数方

zookeeper kafka集群

kafka集群依赖zookeeper运行,新版本不依赖zookeeper,但不具备生产环境使用条件

zookeeper集群部署

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
https://zookeeper.apache.org/releases.html

https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.6.3/apache-zookeeper-3.6.3-bin.tar.gz


edit conf/zoo.cfg
tickTime=2000
dataDir=/var/lib/zookeeper
clientPort=2181
initLimit=5
syncLimit=2
server.1=zoo1:2888:3888
server.2=zoo2:2888:3888
server.3=zoo3:2888:3888

./bin/zkServer.sh start conf/zoo.cfg

kafka集群部署

1
2
3
4
5
6
7
8
9
10
https://kafka.apache.org/documentation

https://www.apache.org/dyn/closer.cgi?path=/kafka/2.8.0/kafka_2.13-2.8.0.tgz

tar -xf kafka_2.13-2.8.0.tgz
cd kafka_2.13-2.8.0
cat config/server.properties
broker.id=0 # 每节点不同
zookeeper.connect=10.17.100.10:2181,10.17.100.20:2181,10.17.100.30:2181 # 相同配置
./bin/kafka-server-start.sh config/server.properties

总结

先写下

ansible awx

wx安装在kubernetes平台,先在kubernetes平台上跑一个operator, 这个operator其实就是一个kubernetes 自定义资源CRD,然后再跑一个awx CRD 实例,在这过程中需要PV卷

前提条件,利用nfs 提供PV卷

1
2
3
4
5
https://github.com/ansible/awx-operator

[root@k8smaster pvc]# showmount -e 192.168.1.10
Export list for 12.19.2.10:
/vols/v20 12.19.2.0/24

pv yaml文件定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
apiVersion: v1
kind: PersistentVolume
metadata:
name: pg
labels:
storesys: nfs
spec:
capacity:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 12.19.2.10
path: /vols/v20

awx-operator.yaml文件

1
wget   https://raw.githubusercontent.com/ansible/awx-operator/devel/deploy/awx-operator.yaml

myawx.yaml

1
2
3
4
5
6
apiVersion: awx.ansible.com/v1beta1
kind: AWX
metadata:
name: awx
spec:
tower_ingress_type: Ingress

kubectl apply

1
2
3
4
5
6
kubectl apply  -f pg-pv.yaml
kubectl apply -f awx-operator.yaml
kubectl apply -f myawx.yaml
kubectl get pods
kubectl get svc
kubectl get ingress

界面访问

1
2
3
4
url access web;
username: admin
password: kubectl get secret awx-admin-password -o jsonpath='{.data.password}' | base64 --decode
gVEQRpzMtaFqLTung4TBF4b7HarECM7L

总结

sysctl TCP优化

内核参数优化

1
2
3
4
5
6
7
8
9
10
11
echo 2048 > /proc/sys/net/core/somaxconn 每一个端口最大的监听队列的长度
echo never > /sys/kernel/mm/transparent_hugepage/enabled 关闭透明页功能
vm.max_map_count=655350 限制一个进程可以拥有的VMA(虚拟内存区域)的数量
fs.file-max = 65536
同时修改vim /etc/security/limits.conf
*  soft  nofile  65536
*  hard  nofile  65536
* soft nproc 65535
* hard nproc 65535
* soft memlock unlimited
* hard memlock unlimited

每保持一个TCP连接,进程就要创建一个文件对象,约占内存3.3KB,4GB内存可以维持长达100万条长连接,客户端发送数据的话,还得为TCP对象开启接收缓冲区,这就增大了内存开销, Linux系统里,系统级、用户级、进程级都有最大打开文件数限制

  • 系统级: 当前系统可以打开的文件数,通过fs.file-max参数修改
  • 用户级: 指定用 户可以打开的数量,通过/etc/security/limits.conf
  • 进程级: 单个进程可以打开的数量,通过fs.nr_open参数修改
1
2
3
4
为每一个TCP连接Socket分配的最少字节数或说是接收缓冲区,默认4K,最大可设8MB
net.ipv4.tcp_rmem = 4096(为TCP连接分配的最少字节数) 87380 8388608
net.core.rmem_default = 2129922
net.core.rmem_max = 8388608
1
2
3
net.ipv4.tcp_wmem= 4096(发送缓存区最小值,默认4K) 65536  8388608
net.core.wmem_default = 212992
net.core.wmem_max = 8388608
1
ss -n | grep ESTAB |wc -l TCP活动连接数统计
1
2
3
4
5
6
7
8
9
10
11
12
13
[root@node03 ~]# slabtop 关注dentry/filp/TCP/sock_node_cache指标
Active / Total Objects (% used) : 3721737 / 4145714 (89.8%)
Active / Total Slabs (% used) : 125168 / 125168 (100.0%)
Active / Total Caches (% used) : 95 / 131 (72.5%)
Active / Total Size (% used) : 821348.16K / 878889.63K (93.5%)
Minimum / Average / Maximum Object : 0.01K / 0.21K / 8.00K

OBJS ACTIVE USE OBJ SIZE SLABS OBJ/SLAB CACHE SIZE NAME
1928862 1578283 81% 0.10K 49458 39 197832K buffer_head
448077 447905 99% 0.19K 21337 21 85348K dentry (指标1)
298544 297412 99% 1.00K 18659 16 298544K filp(指标2)
288966 288966 100% 0.04K 2833 102 11332K TCP(指标3)
233070 233070 100% 0.12K 6855 34 27420K sock_node_cache(指标4)

来个示例配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
[root@server01 ~]# sysctl   -p
net.core.somaxconn = 2048
net.core.rmem_default = 262144
net.core.wmem_default = 262144
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 20000
net.ipv4.tcp_rmem = 4096 4096 16777216
net.ipv4.tcp_wmem = 4096 4096 16777216
net.ipv4.tcp_mem = 786432 2097152 3145728
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_orphans = 131072
net.ipv4.ip_local_port_range = 1024 65535
fs.nr_open = 5000000
fs.file-max = 2000000
fs.inotify.max_user_watches = 16384
vm.max_map_count = 655360
[root@server01 ~]#
[root@server01 ~]#
[root@server01 ~]# cat /etc/security/limits.conf
* hard nofile 1024000
* soft nofile 1024000
* hard nproc unlimited
* soft nproc unlimited
* soft core 0
* hard core 0

总结

慢慢积累吧