forked from picoCTF/picoCTF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker.py
240 lines (192 loc) · 7.9 KB
/
docker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
import time
import logging
import docker
import urllib3
from flask import current_app
import api
from api import PicoException
from api.problem import get_instance_data
log = logging.getLogger(__name__)
# global docker clients
__client = None
# low-level Docker API Client: https://docker-py.readthedocs.io/en/stable/api.html?#
__api_client = None
def get_clients():
"""
Get a high level and a low level docker client connection,
Ensures that only one global docker client exists per thread. If the client
does not exist a new one is created and returned.
"""
global __client, __api_client
if not __client or not __api_client:
try:
conf = current_app.config
# use an explicit remote docker daemon per the configuration
opts = ["DOCKER_HOST", "DOCKER_CA", "DOCKER_CLIENT", "DOCKER_KEY"]
if all([o in conf for o in opts]):
host, ca, client, key = [conf[o] for o in opts]
log.debug("Connecting to docker daemon with config")
tls_config = docker.tls.TLSConfig(ca_cert=ca, client_cert=(client, key), verify=True)
__api_client = docker.APIClient(base_url=host, tls=tls_config)
__client = docker.DockerClient(base_url=host, tls=tls_config)
# Docker options not set in configuration so attempt to use unix socket
else:
log.debug("Connecting to docker daemon on local unix socket")
__api_client = docker.APIClient(base_url="unix:///var/run/docker.sock")
__client = docker.DockerClient(base_url="unix:///var/run/docker.sock")
# ensure a responsive connection
__client.ping()
except docker.errors.APIError as e:
log.debug("Could not connect to docker daemon:" + e)
raise PicoException(
"On Demand backend unavailible. Please contact an admin."
)
return __client, __api_client
def ensure_consistency(tid):
"""
Ensure consistency of a team's containers with ground truth from docker
daemon. This catches scenarios where a container died, or was killed by
timeout.
Args:
tid: The team id to lookup containers for
"""
tracked_cids = {c['cid']: c for c in list_containers_db(tid)}
actual = list_containers_daemon(tid)
# ensure we are tracking all actual containers
for container in actual:
try:
tracked_cids.pop(container.id)
except KeyError:
# container exists but is not in database so delete
log.debug("untracked: ", container.id)
delete(container.id)
# remove any tracked containers that no longer exist
for container_id in tracked_cids:
log.debug("tracked but non existent:", container_id)
delete(container_id)
return actual
def create(tid, image_name):
"""
Start a new container from the specified image.
Args:
tid: The team id to lookup containers for
image_name: the sha256 digest for the image to launch
Returns:
A dictionary containing the container_id and port mappings for the newly
running container. success is False on any errors.
"""
# Query information about the requested image to ensure it exists and get
# problem and port mapping information
db = api.db.get_conn()
image_info = db.images.find_one({"digests": image_name})
if image_info is None:
return {"success": False, "message": "Invalid image"}
pid = image_info["pid"]
# Update database with ground truth
existing_containers = ensure_consistency(tid)
# Check if team has exceeded the number of allowed containers
conf = current_app.config
if "DOCKER_CONTAINERS_PER_TEAM" in conf:
num_allowed = conf["DOCKER_CONTAINERS_PER_TEAM"]
if len(existing_containers) >= num_allowed:
msg = "On Demand Challenge Limit Reached.\nStop another challenge to start this challenge"
return {"success": False, "message": msg}
# check if a container already exists for this challenge
if db.containers.find_one({"tid": tid, "pid": pid}) is not None:
msg = "Challenge already running. Use reset to get a fresh version"
return {"success": False, "message": msg}
client, api_client = get_clients()
# XXX: manage container longevity and deletion
created_at = int(time.time())
labels = {"owner": str(tid), "created_at": str(created_at)}
try:
container = client.containers.run(
image=image_name,
labels=labels,
detach=True,
remove=True,
publish_all_ports=True)
except docker.errors.APIError as e:
log.debug("error: " + e.explanation)
return {"success": False, "message": "Error starting On Demand Challenge"}
# Generate final display ports for the container
# ex: {'5555/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '32842'}]}
ports = api_client.inspect_container(container.id)['NetworkSettings']['Ports']
ports = {k.split("/")[0]: v[0]["HostPort"] for k, v in ports.items()}
port_info = get_instance_data(pid, tid)["port_info"]
display_ports = []
for container_port, external_port in ports.items():
info = port_info[container_port]
display = {"desc": info["desc"], "msg": info["fmt"].format(port=external_port)}
display_ports.append(display)
# store container information in database
data = {"cid": container.id,
"ports": display_ports,
"tid": tid,
"pid": pid,
"created_at": created_at,
"expire_at": created_at + int(conf["DOCKER_TTL"])}
db.containers.insert(data)
return {"success": True, "message": "Challenge started"}
def delete(cid):
"""
Kills and removes a running container. Also updates the database.
Args:
cid: container id to stop and remove from database
"""
client, _ = get_clients()
try:
# kill and remove container on docker daemon
container = client.containers.get(cid)
container.remove(force=True)
except docker.errors.NotFound as e:
log.debug("container not found: ", cid)
except docker.errors.APIError as e:
log.debug("docker error: " + e.explanation)
return False
# also remove from database
db = api.db.get_conn()
db.containers.delete_many({"cid": cid})
return True
def list_containers_daemon(tid):
"""
List the currently running containers for a team. Checks ground truth by
querying the docker daemon.
Args:
tid: The team id to lookup containers for
Returns:
list of Container objects, or None on error
"""
try:
client, _ = get_clients()
filters = {"label": "owner={}".format(tid)}
existing = client.containers.list(filters=filters)
except docker.errors.APIError as e:
log.debug("error: " + e.explanation)
return None
return existing
def list_containers_db(tid):
"""
List the currently running containers for a team. Checks metadata stored in
the database. There is the potential that this information is stale however
consistency is updated on any container related requests for the given team.
This function is appropriate for non-container related functions.
Args:
tid: The team id to lookup containers for
Returns:
mongo cursor (iterator) over the tracked containers
"""
db = api.db.get_conn()
return db.containers.find({"tid": tid})
def submission_to_cid(tid, pid):
"""
Check containers collection for a given team id and problem id pair.
Args:
tid: The team id to lookup containers for
pid: The problem id to lookup containers for
Returns:
mongo cursor (iterator) over the tracked containers
"""
db = api.db.get_conn()
projection = {'_id':0}
return db.containers.find({"tid": tid, "pid": pid}, projection)