Skip to content

Commit affe1f3

Browse files
committed
remove s3 docker, and fix ci npm release
1 parent 0b13435 commit affe1f3

File tree

7 files changed

+144
-40
lines changed

7 files changed

+144
-40
lines changed

.github/workflows/spa-client-release-npm.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ jobs:
1515
registry-url: 'https://registry.npmjs.org'
1616
cache: 'npm'
1717
cache-dependency-path: './jsclient/package-lock.json'
18-
- run: npm ci && npm build && npm publish
18+
- run: npm ci && npm run build && npm publish
1919
working-directory: ./jsclient
2020
env:
2121
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

Makefile

-3
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,6 @@ ifeq ($(VERSION), )
1717
else
1818
DOCKER_BUILDKIT=1 docker build . -t="ghcr.io/fornetcode/spa-server:$(VERSION)"
1919
docker push fornetcode/spa-server:$(VERSION)
20-
cd docker
21-
DOCKER_BUILDKIT=1 docker build . -f S3FS.Dockerfile -t="ghcr.io/fornetcode/spa-server:$(VERSION)-s3"
22-
docker push ghcr.io/fornetcode/spa-server:$(VERSION)-s3
2320
endif
2421

2522
release-doc:

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ It is to provide a static web http server with cache and hot reload.
1111
- Hot reload support(Mac and Linux).
1212
- CORS support.
1313
- Http auto redirect to https.
14-
- Docker support(compressed size: 32M), and support S3 as storage by S3FS.
14+
- Docker support(compressed size: 32M)
1515
- Provide command line/npm package to deploy spa.
1616
- Multiple configs for different domain.
1717

README_CN.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
- 支持热更新(Mac and Linux)。
1111
- 支持 CORS 跨域
1212
- http/https 同时服务(http 也可返回 redirect https)。
13-
- 支持 Docker 镜像(压缩后大小:32M), 并通过S3FS 支持 S3 作为数据存储
13+
- 支持 Docker 镜像(压缩后大小:32M)
1414
- 提供 命令行/npm包 客户端,一行命令部署
1515
- 每个域名可拥有独立的配置
1616

docker/S3FS.Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ ARG VERSION=2.0.0
44
FROM ${BASE_IMAGE}:${VERSION} as Source
55

66

7-
FROM panubo/s3fs:1.87
7+
FROM efrecon/s3fs:1.94
88
COPY --from=Source /test/config.conf /config/config.conf
99
COPY --from=Source /usr/bin/spa-server /usr/bin/spa-server
1010

docker/entry.sh

+140-30
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,153 @@
1-
#!/usr/bin/env bash
2-
# This is FROM S3FS
3-
set -e
4-
[ "${DEBUG:-false}" == 'true' ] && { set -x; S3FS_DEBUG='-d -d'; }
1+
#!/bin/sh
52

6-
# Defaults
7-
: ${AWS_S3_AUTHFILE:='/root/.s3fs'}
8-
: ${AWS_S3_MOUNTPOINT:='/mnt'}
9-
: ${AWS_S3_URL:='https://s3.amazonaws.com'}
10-
: ${S3FS_ARGS:=''}
3+
# Failsafe: Stop on errors and unset variables.
4+
set -eu
115

12-
# If no command specified, print error
13-
[ "$1" == "" ] && set -- "$@" bash -c 'echo "Error: Please specify a command to run."; exit 128'
6+
# Debug
7+
S3FS_DEBUG=${S3FS_DEBUG:-"0"}
148

15-
# Configuration checks
16-
if [ -z "$AWS_STORAGE_BUCKET_NAME" ]; then
17-
echo "Error: AWS_STORAGE_BUCKET_NAME is not specified"
18-
exit 128
9+
# Env file
10+
AWS_S3_ENVFILE=${AWS_S3_ENVFILE:-""}
11+
12+
_verbose() {
13+
if [ "$S3FS_DEBUG" = "1" ]; then
14+
printf %s\\n "$1" >&2
15+
fi
16+
}
17+
18+
_error() {
19+
printf %s\\n "$1" >&2
20+
exit 1
21+
}
22+
23+
# Read the content of the environment file, i.e. a file used to set the value of
24+
# all/some variables.
25+
if [ -n "$AWS_S3_ENVFILE" ]; then
26+
# Read and export lines that set variables in all-caps and starting with
27+
# S3FS_ or AWS_ from the configuration file. This is a security measure to
28+
# crudly protect against evaluating some evil code (but it will still
29+
# evaluate code as part of the value, so use it with care!)
30+
_verbose "Reading configuration from $AWS_S3_ENVFILE"
31+
while IFS= read -r line; do
32+
eval export "$line"
33+
done <<EOF
34+
$(grep -E '^(S3FS|AWS_S3)_[A-Z_]+=' "$AWS_S3_ENVFILE")
35+
EOF
36+
fi
37+
38+
# S3 main URL
39+
AWS_S3_URL=${AWS_S3_URL:-"https://s3.amazonaws.com"}
40+
41+
# Root directory for settings and bucket.
42+
AWS_S3_ROOTDIR=${AWS_S3_ROOTDIR:-"/opt/s3fs"}
43+
44+
# Where are we going to mount the remote bucket resource in our container.
45+
AWS_S3_MOUNT=${AWS_S3_MOUNT:-"${AWS_S3_ROOTDIR%/}/bucket"}
46+
47+
# Authorisation details
48+
AWS_S3_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_ID:-""}
49+
AWS_S3_ACCESS_KEY_ID_FILE=${AWS_S3_ACCESS_KEY_ID_FILE:-""}
50+
AWS_S3_SECRET_ACCESS_KEY=${AWS_S3_SECRET_ACCESS_KEY:-""}
51+
AWS_S3_SECRET_ACCESS_KEY_FILE=${AWS_S3_SECRET_ACCESS_KEY_FILE:-""}
52+
AWS_S3_AUTHFILE=${AWS_S3_AUTHFILE:-""}
53+
54+
# Check variables and defaults
55+
if [ -z "$AWS_S3_ACCESS_KEY_ID" ] && \
56+
[ -z "$AWS_S3_ACCESS_KEY_ID_FILE" ] && \
57+
[ -z "$AWS_S3_SECRET_ACCESS_KEY" ] && \
58+
[ -z "$AWS_S3_SECRET_ACCESS_KEY_FILE" ] && \
59+
[ -z "$AWS_S3_AUTHFILE" ]; then
60+
_error "You need to provide some credentials!!"
61+
fi
62+
if [ -z "${AWS_S3_BUCKET}" ]; then
63+
_error "No bucket name provided!"
64+
fi
65+
66+
# Read AWS S3 Access Key ID from file
67+
if [ -n "${AWS_S3_ACCESS_KEY_ID_FILE}" ]; then
68+
# shellcheck disable=SC2229 # We WANT to read the content of the file pointed by the variable!
69+
read -r AWS_S3_ACCESS_KEY_ID < "${AWS_S3_ACCESS_KEY_ID_FILE}"
70+
fi
71+
72+
# Read AWS S3 Secret Access Key from file
73+
if [ -n "${AWS_S3_SECRET_ACCESS_KEY_FILE}" ]; then
74+
# shellcheck disable=SC2229 # We WANT to read the content of the file pointed by the variable!
75+
read -r AWS_S3_SECRET_ACCESS_KEY < "${AWS_S3_SECRET_ACCESS_KEY_FILE}"
76+
fi
77+
78+
# Create or use authorisation file
79+
if [ -z "${AWS_S3_AUTHFILE}" ]; then
80+
AWS_S3_AUTHFILE=${AWS_S3_ROOTDIR%/}/passwd-s3fs
81+
echo "${AWS_S3_ACCESS_KEY_ID}:${AWS_S3_SECRET_ACCESS_KEY}" > "${AWS_S3_AUTHFILE}"
82+
chmod 600 "${AWS_S3_AUTHFILE}"
83+
fi
84+
85+
# Forget about the secret once done (this will have proper effects when the
86+
# PASSWORD_FILE-version of the setting is used)
87+
if [ -n "${AWS_S3_ACCESS_KEY_ID}" ]; then
88+
unset AWS_S3_ACCESS_KEY_ID
89+
fi
90+
91+
# Forget about the secret once done (this will have proper effects when the
92+
# PASSWORD_FILE-version of the setting is used)
93+
if [ -n "${AWS_S3_SECRET_ACCESS_KEY}" ]; then
94+
unset AWS_S3_SECRET_ACCESS_KEY
95+
fi
96+
97+
# Create destination directory if it does not exist.
98+
if [ ! -d "$AWS_S3_MOUNT" ]; then
99+
mkdir -p "$AWS_S3_MOUNT"
100+
fi
101+
102+
# Add a group, default to naming it after the GID when not found
103+
GROUP_NAME=$(getent group "$GID" | cut -d":" -f1)
104+
if [ "$GID" -gt 0 ] && [ -z "$GROUP_NAME" ]; then
105+
_verbose "Add group $GID"
106+
addgroup -g "$GID" -S "$GID"
107+
GROUP_NAME=$GID
19108
fi
20109

21-
if [ ! -f "${AWS_S3_AUTHFILE}" ] && [ -z "$AWS_ACCESS_KEY_ID" ]; then
22-
echo "Error: AWS_ACCESS_KEY_ID not specified, or ${AWS_S3_AUTHFILE} not provided"
23-
exit 128
110+
# Add a user, default to naming it after the UID.
111+
RUN_AS=${RUN_AS:-""}
112+
if [ "$UID" -gt 0 ]; then
113+
USER_NAME=$(getent passwd "$UID" | cut -d":" -f1)
114+
if [ -z "$USER_NAME" ]; then
115+
_verbose "Add user $UID, turning on rootless-mode"
116+
adduser -u "$UID" -D -G "$GROUP_NAME" "$UID"
117+
else
118+
_verbose "Running as user $UID, turning on rootless-mode"
119+
fi
120+
RUN_AS=$UID
121+
chown "${UID}:${GID}" "$AWS_S3_MOUNT" "${AWS_S3_AUTHFILE}" "$AWS_S3_ROOTDIR"
24122
fi
25123

26-
if [ ! -f "${AWS_S3_AUTHFILE}" ] && [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
27-
echo "Error: AWS_SECRET_ACCESS_KEY not specified, or ${AWS_S3_AUTHFILE} not provided"
28-
exit 128
124+
# Debug options
125+
DEBUG_OPTS=
126+
if [ "$S3FS_DEBUG" = "1" ]; then
127+
DEBUG_OPTS="-d -d"
29128
fi
30129

31-
# Write auth file if it does not exist
32-
if [ ! -f "${AWS_S3_AUTHFILE}" ]; then
33-
echo "${AWS_ACCESS_KEY_ID}:${AWS_SECRET_ACCESS_KEY}" > ${AWS_S3_AUTHFILE}
34-
chmod 400 ${AWS_S3_AUTHFILE}
130+
# Additional S3FS options
131+
if [ -n "$S3FS_ARGS" ]; then
132+
S3FS_ARGS="-o $S3FS_ARGS"
35133
fi
36134

37-
echo "==> Mounting S3 Filesystem"
38-
mkdir -p ${AWS_S3_MOUNTPOINT}
135+
# Mount as the requested used.
136+
_verbose "Mounting bucket ${AWS_S3_BUCKET} onto ${AWS_S3_MOUNT}, owner: $UID:$GID"
137+
su - $RUN_AS -c "s3fs $DEBUG_OPTS ${S3FS_ARGS} \
138+
-o passwd_file=${AWS_S3_AUTHFILE} \
139+
-o "url=${AWS_S3_URL}" \
140+
-o uid=$UID \
141+
-o gid=$GID \
142+
${AWS_S3_BUCKET} ${AWS_S3_MOUNT}"
39143

40-
# s3fs mount command
41-
s3fs $S3FS_DEBUG $S3FS_ARGS -o passwd_file=${AWS_S3_AUTHFILE} -o url=${AWS_S3_URL} ${AWS_STORAGE_BUCKET_NAME} ${AWS_S3_MOUNTPOINT}
144+
# s3fs can claim to have a mount even though it didn't succeed. Doing an
145+
# operation actually forces it to detect that and remove the mount.
146+
su - $RUN_AS -c "stat ${AWS_S3_MOUNT}"
42147

43-
exec /spa-server
148+
if healthcheck.sh; then
149+
echo "Mounted bucket ${AWS_S3_BUCKET} onto ${AWS_S3_MOUNT}"
150+
exec /spa-server "$@"
151+
else
152+
_error "Mount failure"
153+
fi

docs/guide/spa-server-release-package.md

-3
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,6 @@
22
## Docker Image
33
The docker image is distributed at `Github Packages` as `ghcr.io/fornetcode/spa-server`.
44

5-
### AWS S3 Support
6-
We support S3 storage by docker `panubo/docker-s3fs`, and release as `ghcr.io/fornetcode/spa-server:${version}-s3`, all configure about S3fs fuse can be found [here](https://github.com/panubo/docker-s3fs).
7-
85
## From Code
96
There no plan to release binary package. You can `git clone` the code and build yourself.
107

0 commit comments

Comments
 (0)