forked from clapy-app/clapy-plugin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
152 lines (141 loc) · 5.2 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
version: '3.7'
volumes:
node_modules:
x-backend:
&backend
build:
dockerfile: ./Dockerfile.dev
context: ./backend-clapy
user: "${UID:-$(id -u)}:${GID:-1000}"
volumes:
- node_modules:/app/node_modules
- ./backend-clapy:/app
- ./figma-plugin-clapy/src/components:/plugin/components
env_file: .env
environment:
ISDOCKER: 'true'
VITE_HASURA_HOSTNAME: hasura
VITE_HASURA_PORT: ${HASURA_DOCKER_PORT}
POSTGRES_HOST: db
POSTGRES_PORT_CONSUMER: ${POSTGRES_PORT_CONTAINER}
depends_on:
- "db"
ports:
- '4141:4141'
- '9229:9229'
# cap_add:
# - SYS_ADMIN
# Change command to run either the webservice or a separate script
command: yarn start:docker:debug
# command: yarn start:docker:debug:main2
# command: ls -l
deploy:
resources:
limits:
# Should be 512 to match Cloud Run, but the rebuild in watch mode seems to consume too much and needs an extended memory.
memory: 700M
services:
db:
image: postgres:14-alpine
container_name: clapy_db
env_file: .env
ports:
- '${POSTGRES_PORT_CONSUMER}:${POSTGRES_PORT_CONTAINER}'
stripe:
image: stripe/stripe-cli:latest
command: "listen --api-key ${STRIPE_SECRET_KEY} --device-name docker-stripe-cli --forward-to host.docker.internal:4141/stripe/webhook/"
env_file: .env
hasura:
# See db/README.md to update the version number.
image: hasura/graphql-engine:v2.10.1.cli-migrations-v3
container_name: clapy_hasura
ports:
- '${VITE_HASURA_PORT}:${HASURA_DOCKER_PORT}'
depends_on:
- "db"
volumes:
- ./db/hasura/metadata:/hasura-metadata
- ./db/hasura/migrations:/hasura-migrations
- "/etc/timezone:/etc/timezone:ro"
- "/etc/localtime:/etc/localtime:ro"
env_file: .env
environment:
HASURA_GRAPHQL_METADATA_DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT_CONTAINER}/${POSTGRES_DB}"
# HASURA_GRAPHQL_DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT_CONTAINER}/${POSTGRES_DB}"
PG_DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT_CONTAINER}/${POSTGRES_DB}"
HASURA_GRAPHQL_ENABLE_CONSOLE: "false"
## enable debugging mode. It is recommended to disable this in production
HASURA_GRAPHQL_DEV_MODE: "true"
HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log
HASURA_GRAPHQL_UNAUTHORIZED_ROLE: anonymous
HASURA_GRAPHQL_NO_OF_RETRIES: 1000
HASURA_GRAPHQL_JWT_SECRET: "{\"jwk_url\": \"https://${VITE_AUTH0_DOMAIN}/.well-known/jwks.json\", \"type\": \"RS256\", \"audience\": \"${VITE_AUTH0_AUDIENCE}\", \"issuer\": \"https://${VITE_AUTH0_DOMAIN}/\"}"
# restart: on-failure
hasuraprod:
build:
context: ./db
container_name: clapy_hasuraprod
ports:
- '${VITE_HASURA_PORT}:${HASURA_DOCKER_PORT}'
depends_on:
- "db"
volumes:
- "/etc/timezone:/etc/timezone:ro"
- "/etc/localtime:/etc/localtime:ro"
env_file: .env
environment:
HASURA_GRAPHQL_METADATA_DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT_CONTAINER}/${POSTGRES_DB}"
PG_DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT_CONTAINER}/${POSTGRES_DB}"
HASURA_GRAPHQL_ENABLE_CONSOLE: "true"
## enable debugging mode. It is recommended to disable this in production
HASURA_GRAPHQL_DEV_MODE: "true"
HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log
HASURA_GRAPHQL_UNAUTHORIZED_ROLE: anonymous
HASURA_GRAPHQL_NO_OF_RETRIES: 1000
HASURA_GRAPHQL_JWT_SECRET: "{\"jwk_url\": \"https://${VITE_AUTH0_DOMAIN}/.well-known/jwks.json\", \"type\": \"RS256\", \"audience\": \"${VITE_AUTH0_AUDIENCE}\", \"issuer\": \"https://${VITE_AUTH0_DOMAIN}/\"}"
backend:
<<: *backend
container_name: clapy_backend
dev:
<<: *backend
container_name: dev
command: yarn start:docker:debug:main2
pgadmin:
image: dpage/pgadmin4:latest
container_name: clapy_pgadmin
env_file: .env
depends_on:
- "db"
ports:
- "5409:80"
backendprod:
build:
dockerfile: ./backend-clapy/Dockerfile
context: .
env_file: .env
environment:
ISDOCKER: 'true'
VITE_HASURA_HOSTNAME: hasura
VITE_HASURA_PORT: ${HASURA_DOCKER_PORT}
POSTGRES_HOST: db
POSTGRES_PORT_CONSUMER: ${POSTGRES_PORT_CONTAINER}
depends_on:
- "db"
ports:
- '4141:4141'
- '9229:9229'
command: yarn start:prod
# command: ls -la
deploy:
resources:
limits:
# Should be 512 to match Cloud Run, but the rebuild in watch mode seems to consume too much and needs an extended memory.
memory: 700M
### Check tutorial for setup: https://cloud.google.com/sql/docs/postgres/connect-admin-ip?hl=fr
# proxy-cloud-sql:
# image: gcr.io/cloudsql-docker/gce-proxy:latest
# ports:
# - 127.0.0.1:3307:5432
# volumes:
# - ./credentials/cloud-sql-proxy-service-account-key.json:/config:cached
# command: /cloud_sql_proxy -instances=clapy-production:europe-west1:clapy=tcp:0.0.0.0:5432 -credential_file=/config