collapse everything into one docker-compose environment and enable prometheus logging

single-dockerfile
Ubuntu 2 years ago
parent 250030fe7a
commit 7a0a530add
  1. 3
      .gitignore
  2. 87
      Makefile
  3. 1
      README.md
  4. 34
      env.production
  5. 41
      grafana.yaml
  6. 26
      grafana/docker-compose.yaml
  7. 0
      grafana/env.production
  8. 4
      grafana/keycloak.sh
  9. 6
      grafana/nginx.conf
  10. 50
      grafana/setup
  11. 58
      hedgedoc.yaml
  12. 34
      hedgedoc/docker-compose.yaml
  13. 2
      hedgedoc/env.production
  14. 50
      hedgedoc/keycloak.sh
  15. 8
      hedgedoc/nginx.conf
  16. 69
      hedgedoc/setup
  17. 63
      keycloak.yaml
  18. 5
      keycloak/README.md
  19. 44
      keycloak/client-create
  20. 40
      keycloak/client-delete
  21. 43
      keycloak/docker-compose.yaml
  22. 0
      keycloak/env.production
  23. 4
      keycloak/nginx.conf
  24. 119
      keycloak/setup
  25. 198
      mastodon.yaml
  26. 3
      mastodon/README.md
  27. 131
      mastodon/docker-compose.yaml
  28. 38
      mastodon/entrypoint.sh
  29. 9
      mastodon/env.production
  30. 3
      mastodon/keycloak.sh
  31. 154
      mastodon/nginx.conf
  32. 78
      mastodon/setup
  33. 30
      nginx.yaml
  34. 30
      nginx/Dockerfile
  35. 34
      nginx/certbot-renew
  36. 41
      nginx/default.conf
  37. 28
      nginx/docker-compose.yaml
  38. 12
      nginx/docker-entrypoint.d/01-collectd.sh
  39. 31
      nginx/docker-entrypoint.d/10-createkey.sh
  40. 39
      nginx/docker-entrypoint.d/20-envsubst-on-templates.sh
  41. 47
      nginx/docker-entrypoint.sh
  42. 0
      nginx/env.production
  43. 0
      nginx/etc/includes/challenge.conf
  44. 0
      nginx/etc/includes/options-ssl-nginx.conf
  45. 0
      nginx/etc/includes/ssl-dhparams.pem
  46. 25
      nginx/etc/nginx.conf
  47. 30
      nginx/nginx/templates/pixelfed.conf.template
  48. 41
      nginx/nginx/templates/social.conf.template
  49. 39
      nginx/setup
  50. 18
      prometheus.yaml
  51. 21
      prometheus/prometheus.yaml
  52. 39
      start-all
  53. 7
      stop-all

3
.gitignore vendored

@ -2,3 +2,6 @@
data data
*.secrets *.secrets
env.smtp env.smtp
*.old
*.log
test

@ -0,0 +1,87 @@
MODULES += nginx
MODULES += keycloak
MODULES += hedgedoc
MODULES += grafana
MODULES += prometheus
MODULES += mastodon
#MODULES += pixelfed
include env.production
domain_name := $(DOMAIN_NAME)
help:
@echo "usage: make run"
UC = $(shell echo '$1' | tr '[:lower:]' '[:upper:]')
DOCKER = \
$(foreach m,$(MODULES),. data/$m/secrets && ) \
docker-compose \
--env-file env.production \
$(foreach m,$(MODULES),--file ./$m.yaml) \
run:
$(DOCKER) up
down:
$(DOCKER) down
nginx-shell:
$(DOCKER) exec nginx sh
grafana-shell:
$(DOCKER) exec grafana bash
hedgedoc-shell:
$(DOCKER) exec hedgedoc sh
keycloak-shell:
$(DOCKER) exec keycloak sh
mastodon-shell:
$(DOCKER) exec mastodon bash
mastodon-streaming-shell:
$(DOCKER) exec mastodon-streaming bash
nginx-build: data/nginx/secrets
$(DOCKER) build nginx
certdir = ./data/certbot/conf/live/${DOMAIN_NAME}
run: secrets-setup
secrets-setup: $(foreach m,$(MODULES),data/$m/secrets)
# Create the per-subdomain secrets if they don't exist
# not every service requires all of these features, but create them anyway
GET_MODULE = $(call UC,$(word 2,$(subst /, ,$@)))
RAND = $$(openssl rand -hex $1)
data/%/secrets:
mkdir -p $(dir $@)
echo >$@ "# DO NOT CHECK IN"
echo >>$@ "export $(GET_MODULE)_ADMIN_PASSWORD=$(call RAND,8)"
echo >>$@ "export $(GET_MODULE)_CLIENT_SECRET=$(call RAND,20)"
echo >>$@ "export $(GET_MODULE)_SESSION_SECRET=$(call RAND,20)"
keycloak-setup: secrets-setup
$(DOCKER) run keycloak-setup
certbot:
$(DOCKER) \
run --entrypoint '/bin/sh -c "\
rm -rf /etc/letsencrypt ; \
certbot certonly \
--webroot \
--webroot-path /var/www/certbot \
--email "admin@$(DOMAIN_NAME)" \
--rsa-key-size "2048" \
--agree-tos \
--no-eff-email \
--force-renewal \
-d $(DOMAIN_NAME) \
$(foreach m,$(MODULES),\
-d $($(call UC,$m)_HOSTNAME).$(DOMAIN_NAME)) \
"' certbot
nginx-reload:
$(DOCKER) restart nginx
config:
$(DOCKER) config
FORCE:

@ -8,6 +8,7 @@ Infrastructure for the self-hosted, single-sign-on, community-run services.
``` ```
apt install jq docker-compose apt install jq docker-compose
apt install prometheus
``` ```
* Setup each of the services. `keycloak` and `nginx` are required to start the others: * Setup each of the services. `keycloak` and `nginx` are required to start the others:

@ -1,12 +1,26 @@
DOMAIN_NAME=hackerspace.zone # Fill in with your top-level domain name and desired OAUTH realm name
DOMAIN_NAME=dev.v.st
REALM=hackerspace REALM=hackerspace
KEYCLOAK_HOSTNAME=login.hackerspace.zone # Fill in with your SMTP server, if you have one
HEDGEDOC_HOSTNAME=docs.hackerspace.zone SMTP_SERVER=
MASTODON_HOSTNAME=social.hackerspace.zone SMTP_USER=
NEXTCLOUD_HOSTNAME=cloud.hackerspace.zone SMTP_PASSWORD=
GRAFANA_HOSTNAME=dashboard.hackerspace.zone SMTP_PORT=
GITEA_HOSTNAME=git.hackerspace.zone
MATRIX_HOSTNAME=matrix.hackerspace.zone # You can leave these as is or change them if you like
MOBILIZON_HOSTNAME=events.hackerspace.zone NGINX_HOSTNAME=www
PIXELFED_HOSTNAME=pixelfed.hackerspace.zone KEYCLOAK_HOSTNAME=login
HEDGEDOC_HOSTNAME=docs
MASTODON_HOSTNAME=social
NEXTCLOUD_HOSTNAME=cloud
GRAFANA_HOSTNAME=dashboard
GITEA_HOSTNAME=git
MATRIX_HOSTNAME=matrix
MOBILIZON_HOSTNAME=events
PIXELFED_HOSTNAME=pixelfed
PROMETHEUS_HOSTNAME=metrics
AUTH_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/auth
TOKEN_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/token
USERINFO_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/userinfo

@ -0,0 +1,41 @@
version: "3"
services:
grafana:
image: grafana/grafana-oss:8.5.1
container_name: grafana
user: "0:0"
environment:
GF_AUTH_GENERIC_OAUTH_ENABLED: 'True'
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: 'True' # otherwise no login is possible
#GF_AUTH_GENERIC_OAUTH_TEAM_IDS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_ORGANIZATIONS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: '<domains>'
#GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD} # ignored?
GF_AUTH_GENERIC_OAUTH_NAME: Keycloak
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: grafana
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
GF_SERVER_ROOT_URL: https://${GRAFANA_HOSTNAME}.${DOMAIN_NAME}/
GF_SERVER_DOMAIN: ${GRAFANA_HOSTNAME}.${DOMAIN_NAME}
GF_AUTH_GENERIC_OAUTH_AUTH_URL: ${AUTH_URL}
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: ${TOKEN_URL}
GF_AUTH_GENERIC_OAUTH_API_URL: ${USERINFO_URL}
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: ${GRAFANA_CLIENT_SECRET}
# reset the admin password on every run, since otherwise it defaults to admin/admin
entrypoint: ["sh", "-c", "grafana-cli admin reset-admin-password ${GRAFANA_ADMIN_PASSWORD} && /run.sh"]
volumes:
- ./data/grafana:/var/lib/grafana
restart: always
# ports:
# - 3000:3000
# add the grafana nginx configuration into the nginx volume
nginx:
volumes:
- ./grafana/nginx.conf:/etc/nginx/templates/grafana.conf.template:ro
# add the grafana client secrets to the keycloak-setup volume
keycloak-setup:
env_file:
- data/grafana/secrets
volumes:
- ./grafana/keycloak.sh:/keycloak-setup/grafana.sh:ro

@ -1,26 +0,0 @@
version: "3"
services:
grafana:
image: grafana/grafana-oss:8.5.1
user: "0:0"
environment:
GF_AUTH_GENERIC_OAUTH_ENABLED: 'True'
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: 'True' # otherwise no login is possible
#GF_AUTH_GENERIC_OAUTH_TEAM_IDS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_ORGANIZATIONS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: '<domains>'
GF_AUTH_GENERIC_OAUTH_NAME: Keycloak
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: grafana
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET is in env.secrets
# auth URLs are in the env.secrets since they have hostname expansion
volumes:
- ../data/grafana:/var/lib/grafana
restart: always
ports:
- 8000:3000
env_file:
- ../env.production
- env.production
- ../data/grafana/env.secrets

@ -0,0 +1,4 @@
#!/bin/bash -x
# Setup the grafana client connection
client-create grafana "$GRAFANA_HOSTNAME.$DOMAIN_NAME" "$GRAFANA_CLIENT_SECRET" </dev/null

@ -4,7 +4,7 @@ map $http_upgrade $connection_upgrade {
} }
server { server {
server_name ${GRAFANA_HOSTNAME}; server_name ${GRAFANA_HOSTNAME} ${GRAFANA_HOSTNAME}.${DOMAIN_NAME};
client_max_body_size 128m; client_max_body_size 128m;
sendfile on; sendfile on;
@ -24,7 +24,7 @@ server {
chunked_transfer_encoding on; chunked_transfer_encoding on;
location / { location / {
proxy_pass http://host.docker.internal:8000; proxy_pass http://grafana:3000;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@ -32,7 +32,7 @@ server {
} }
location /socket.io/ { location /socket.io/ {
proxy_pass http://host.docker.internal:8000; proxy_pass http://grafana:3000;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

@ -1,50 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
BASE="https://$KEYCLOAK_HOSTNAME/realms/$REALM/protocol/openid-connect"
SECRETS="../data/grafana/env.secrets"
if [ -r "$SECRETS" ]; then
docker-compose up -d || die "grafana: unable to start container"
exit 0
fi
docker-compose down 2>/dev/null
GRAFANA_CLIENT_SECRET="$(openssl rand -hex 32)"
GRAFANA_ADMIN_PASSWORD="$(openssl rand -hex 4)"
echo "Generating secrets: admin password $GRAFANA_ADMIN_PASSWORD"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# Do not check in!
GF_SECURITY_ADMIN_PASSWORD=$GRAFANA_ADMIN_PASSWORD
GF_SERVER_ROOT_URL=https://$GRAFANA_HOSTNAME/
GF_SERVER_DOMAIN=$GRAFANA_HOSTNAME
GF_AUTH_GENERIC_OAUTH_AUTH_URL=$BASE/auth
GF_AUTH_GENERIC_OAUTH_TOKEN_URL=$BASE/token
GF_AUTH_GENERIC_OAUTH_API_URL=$BASE/userinfo
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=$GRAFANA_CLIENT_SECRET
EOF
../keycloak/client-delete 'grafana' 2>/dev/null
../keycloak/client-create << EOF || die "unable to create client id"
{
"clientId": "grafana",
"rootUrl": "https://$GRAFANA_HOSTNAME/",
"adminUrl": "https://$GRAFANA_HOSTNAME/",
"redirectUris": [ "https://$GRAFANA_HOSTNAME/*" ],
"webOrigins": [ "https://$GRAFANA_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$GRAFANA_CLIENT_SECRET"
}
EOF
docker-compose up -d || die "grafana: unable to bring up container"

@ -0,0 +1,58 @@
version: '3.9'
services:
hedgedoc-db:
image: postgres:13.4-alpine
container_name: hedgedoc-db
environment:
- POSTGRES_USER=hedgedoc
- POSTGRES_PASSWORD=password
- POSTGRES_DB=hedgedoc
volumes:
- ./data/hedgedoc/database:/var/lib/postgresql/data
restart: always
hedgedoc:
# Make sure to use the latest release from https://hedgedoc.org/latest-release
image: quay.io/hedgedoc/hedgedoc:1.9.4
container_name: hedgedoc
environment:
#- CMD_CSP_ENABLE=false
- CMD_DB_URL=postgres://hedgedoc:password@hedgedoc-db:5432/hedgedoc
- CMD_PROTOCOL_USESSL=true
- CMD_ALLOW_ANONYMOUS=false # anonymous user's can't create notes
- CMD_ALLOW_ANONYMOUS_EDITS=true # but they can be invited to edit notes
- CMD_ALLOW_FREEURL=true # users can create arbitrary names
- CMD_EMAIL=false # only oauth logins
- CMD_DOMAIN=${HEDGEDOC_HOSTNAME}.${DOMAIN_NAME}
- CMD_OAUTH2_AUTHORIZATION_URL=${AUTH_URL}
- CMD_OAUTH2_TOKEN_URL=${TOKEN_URL}
- CMD_OAUTH2_USER_PROFILE_URL=${USERINFO_URL}
- CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
- CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
- CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
- CMD_OAUTH2_CLIENT_ID=hedgedoc
- CMD_OAUTH2_PROVIDERNAME=Keycloak
- CMD_OAUTH2_CLIENT_SECRET=${HEDGEDOC_CLIENT_SECRET}
- CMD_SESSION_SECRET=${HEDGEDOC_SESSION_SECRET}
env_file:
- env.production
volumes:
- ./data/hedgedoc/uploads:/hedgedoc/public/uploads
# ports:
#- "3000:3000"
restart: always
depends_on:
- hedgedoc-db
- keycloak
# add the hedgedoc nginx configuration into the nginx volume
nginx:
volumes:
- ./hedgedoc/nginx.conf:/etc/nginx/templates/hedgedoc.conf.template:ro
# add the hedgedoc client secrets to the keycloak-setup volume
keycloak-setup:
env_file:
- data/hedgedoc/secrets
volumes:
- ./hedgedoc/keycloak.sh:/keycloak-setup/hedgedoc.sh:ro

@ -1,34 +0,0 @@
version: '3'
services:
database:
image: postgres:13.4-alpine
environment:
- POSTGRES_USER=hedgedoc
- POSTGRES_PASSWORD=password
- POSTGRES_DB=hedgedoc
volumes:
- ../data/hedgedoc/database:/var/lib/postgresql/data
restart: always
hedgedoc:
# Make sure to use the latest release from https://hedgedoc.org/latest-release
image: quay.io/hedgedoc/hedgedoc:1.9.3
env_file:
- ../env.production
- env.production
- ../data/hedgedoc/env.secrets
environment:
#- CMD_CSP_ENABLE=false
- CMD_DB_URL=postgres://hedgedoc:password@database:5432/hedgedoc
- CMD_PROTOCOL_USESSL=true
- CMD_ALLOW_ANONYMOUS=false # anonymous user's can't create notes
- CMD_ALLOW_ANONYMOUS_EDITS=true # but they can be invited to edit notes
- CMD_ALLOW_FREEURL=true # users can create arbitrary names
- CMD_EMAIL=false # only oauth logins
# DOMAIN and OAUTH2 variables are now in env.secret
volumes:
- ../data/hedgedoc/uploads:/hedgedoc/public/uploads
ports:
- "3000:3000"
restart: always
depends_on:
- database

@ -1,2 +0,0 @@
CMD_OAUTH2_CLIENT_SECRET=abcdef1234
CMD_SESSION_SECRET=abcdef1234

@ -0,0 +1,50 @@
#!/bin/bash -x
# Setup the hedgedoc client connection
# this might fail; we'll ignore it if we have already created it
# https://github.com/hedgedoc/hedgedoc/issues/56
kcadm.sh \
create client-scopes \
-r "$REALM" \
-f - <<EOF || echo "whatever"
{
"name": "id",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "id",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-property-mapper",
"consentRequired": false,
"config": {
"user.attribute": "id",
"id.token.claim": "true",
"access.token.claim": "true",
"jsonType.label": "String",
"userinfo.token.claim": "true"
}
}
]
}
EOF
client-create hedgedoc "$HEDGEDOC_HOSTNAME.$DOMAIN_NAME" "$HEDGEDOC_CLIENT_SECRET" <<EOF
,"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"id",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"microprofile-jwt"
]
EOF

@ -4,7 +4,7 @@ map $http_upgrade $connection_upgrade {
} }
server { server {
server_name ${HEDGEDOC_HOSTNAME}; server_name ${HEDGEDOC_HOSTNAME} ${HEDGEDOC_HOSTNAME}.${DOMAIN_NAME};
client_max_body_size 128m; client_max_body_size 128m;
sendfile on; sendfile on;
@ -28,7 +28,7 @@ server {
chunked_transfer_encoding on; chunked_transfer_encoding on;
location / { location / {
proxy_pass http://host.docker.internal:3000; proxy_pass http://hedgedoc:3000;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@ -38,7 +38,7 @@ server {
# allow the fonts to be used by anything # allow the fonts to be used by anything
location ~* \.(eot|otf|ttf|woff|woff2)$ { location ~* \.(eot|otf|ttf|woff|woff2)$ {
add_header Access-Control-Allow-Origin *; add_header Access-Control-Allow-Origin *;
proxy_pass http://host.docker.internal:3000; proxy_pass http://hedgedoc:3000;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@ -47,7 +47,7 @@ server {
# websocket traffic with extra headers for upgrading the connection # websocket traffic with extra headers for upgrading the connection
location /socket.io/ { location /socket.io/ {
proxy_pass http://host.docker.internal:3000; proxy_pass http://hedgedoc:3000;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

@ -1,69 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source env.production || die "no local env?"
DATA="../data/hedgedoc"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker-compose up -d || die "hedgedoc: unable to start"
exit 0
fi
docker-compose down 2>/dev/null
# regenerate the client secrets
CLIENT_SECRET="$(openssl rand -hex 20)"
SESSION_SECRET="$(openssl rand -hex 20)"
mkdir -p "$DATA/uploads"
chmod 666 "$DATA/uploads"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
CMD_OAUTH2_CLIENT_SECRET=$CLIENT_SECRET
CMD_SESSION_SECRET=$SESSION_SECRET
CMD_DOMAIN=${HEDGEDOC_HOSTNAME}
CMD_OAUTH2_AUTHORIZATION_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/auth
CMD_OAUTH2_TOKEN_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/token
CMD_OAUTH2_USER_PROFILE_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/userinfo
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
CMD_OAUTH2_CLIENT_ID=hedgedoc
CMD_OAUTH2_PROVIDERNAME=Keycloak
EOF
../keycloak/client-delete hedgedoc
../keycloak/client-create <<EOF || die "unable to create hedgedoc client"
{
"clientId": "hedgedoc",
"rootUrl": "https://$HEDGEDOC_HOSTNAME",
"adminUrl": "https://$HEDGEDOC_HOSTNAME",
"redirectUris": [ "https://$HEDGEDOC_HOSTNAME/*" ],
"webOrigins": [ "https://$HEDGEDOC_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET",
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"id",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"microprofile-jwt"
]
}
EOF
docker-compose up -d || die "hedgedoc: unable to start container"

@ -0,0 +1,63 @@
version: '3.9'
services:
keycloak-db:
image: mysql:5.7
restart: always
container_name: keycloak-db
volumes:
- ./data/keycloak/database:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: keycloak
MYSQL_USER: keycloak
MYSQL_PASSWORD: password
keycloak:
image: quay.io/keycloak/keycloak:18.0.0
restart: always
container_name: keycloak
entrypoint: /opt/keycloak/bin/kc.sh start --hostname="$${KEYCLOAK_HOSTNAME}.$${DOMAIN_NAME}" --proxy=edge
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:8080"]
# interval: 30s
# timeout: 10s
# retries: 3
user: "0:0" # otherwise the persistent data directory is not writable
env_file:
- env.production
- data/keycloak/secrets
environment:
DB_VENDOR: MYSQL
DB_ADDR: keycloak-db
DB_DATABASE: keycloak
DB_USER: keycloak
DB_PASSWORD: password
KEYCLOAK_ADMIN: admin
KEYCLOAK_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
PROXY_ADDRESS_FORWARDING: 'true'
# KEYCLOAK_ADMIN_PASSWORD is set in env.secrets
volumes:
- ./data/keycloak/certs:/etc/x509/https
- ./data/keycloak/keycloak:/opt/keycloak/data
depends_on:
- keycloak-db
keycloak-setup:
image: quay.io/keycloak/keycloak:18.0.0
profiles:
- setup
depends_on:
- keycloak
restart: never
env_file:
- env.production
- data/keycloak/secrets
entrypoint: /keycloak-setup.sh
volumes:
- ./keycloak/setup:/keycloak-setup.sh:ro
- ./keycloak/client-create:/bin/client-create:ro
# add the keycloak nginx configuration into the nginx volume
nginx:
volumes:
- ./keycloak/nginx.conf:/etc/nginx/templates/keycloak.conf.template:ro

@ -1,7 +1,6 @@
# Keycloak # Keycloak
Keycloak is the single-sign-on user authentication provider. Keycloak is the single-sign-on user authentication provider.
You must set the `KEYCLOAK_ADMIN_PASSWORD` in the `env.secrets` file. In order to login to create the first account, use `admin` and
This is the most important secret: it allows user accounts to be created the password stored in `data/keycloak/secrets`
for all the other services.

@ -1,20 +1,34 @@
#!/bin/bash #!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; } die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)" client_name="$1"
cd "$DIRNAME" hostname="$2"
secret="$3"
source ../env.production || die "no top levle env?" client_id="$(kcadm.sh get clients \
source env.production || die "no local env?" -r "$REALM" \
source "../data/keycloak/env.secrets" || die "no local secrets?" --fields id \
-q clientId="$client_name" \
--format csv \
--noquotes \
)"
docker-compose exec -T keycloak \ if [ -n "$client_id" ]; then
/opt/keycloak/bin/kcadm.sh \ kcadm.sh delete "clients/$client_id" -r "$REALM" || die "$client_id: unable to delete"
create clients \ fi
--server http://localhost:8080/ \
--user admin \ # remember to add a leading , if adding extra data
--realm master \ extra="$(cat -)"
--password "$KEYCLOAK_ADMIN_PASSWORD" \
-r "$REALM" \ kcadm.sh create clients -r "$REALM" -f - <<EOF || die "$client_id: unable to create"
-f - \ {
|| die "create client failed" "clientId": "$client_name",
"rootUrl": "https://$hostname",
"adminUrl": "https://$hostname",
"redirectUris": [ "https://$hostname/*" ],
"webOrigins": [ "https://$hostname" ],
"clientAuthenticatorType": "client-secret",
"secret": "$secret"
$extra
}
EOF

@ -1,40 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source env.production || die "no local env?"
source "../data/keycloak/env.secrets" || die "no local secrets?"
# try to get the clients by name
CLIENT_NAME="$1"
if [ -z "$CLIENT_NAME" ]; then
die "usage: $0 clientName"
fi
CLIENT_ID="$(docker-compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
get clients \
--server http://localhost:8080/ \
--user admin \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
--realm master \
-r "$REALM" \
| jq -r ".[] | select( .clientId == \"$CLIENT_NAME\" ).id")"
if [ -z "$CLIENT_ID" ]; then
die "$CLIENT_NAME: no such client"
fi
echo "$0: $CLIENT_NAME = $CLIENT_ID"
docker-compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
delete "clients/$CLIENT_ID" \
--server http://localhost:8080/ \
--user admin \
--realm master \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
-r "$REALM" \
|| die "$CLIENT_NAME($CLIENT_ID): unable to remove"

@ -1,43 +0,0 @@
version: '3'
volumes:
mysql_data:
driver: local
services:
mysql:
image: mysql:5.7
restart: always
volumes:
- ../data/keycloak/database:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: keycloak
MYSQL_USER: keycloak
MYSQL_PASSWORD: password
keycloak:
image: quay.io/keycloak/keycloak:18.0.0
restart: always
entrypoint: /opt/keycloak/bin/kc.sh start --hostname="$${KEYCLOAK_HOSTNAME}" --proxy=edge
user: "0:0" # otherwise the persistent data directory is not writable
env_file:
- ../env.production
- env.production
- ../data/keycloak/env.secrets
environment:
DB_VENDOR: MYSQL
DB_ADDR: mysql
DB_DATABASE: keycloak
DB_USER: keycloak
DB_PASSWORD: password
KEYCLOAK_ADMIN: admin
# KEYCLOAK_ADMIN_PASSWORD should be set in env.secrets
PROXY_ADDRESS_FORWARDING: 'true'
volumes:
- ../data/keycloak/certs:/etc/x509/https
- ../data/keycloak/keycloak:/opt/keycloak/data
ports:
- 8080:8080
depends_on:
- mysql

@ -1,9 +1,9 @@
server { server {
server_name login.${DOMAIN_NAME}; server_name ${KEYCLOAK_HOSTNAME} ${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME};
client_max_body_size 128m; client_max_body_size 128m;
location / { location / {
proxy_pass http://host.docker.internal:8080; proxy_pass http://keycloak:8080;
proxy_pass_header Set-Cookie; proxy_pass_header Set-Cookie;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;

@ -1,119 +0,0 @@
#!/bin/bash
die() { echo >&2 "keycloak: ERROR: $@" ; exit 1 ; }
info() { echo >&2 "keycloak: $@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production
source ./env.production
source "../env.smtp" 2>/dev/null
SECRETS="../data/keycloak/env.secrets"
if [ -r "$SECRETS" ]; then
docker-compose up -d || die "keycloak: unable to start container"
exit 0
fi
docker-compose down 2>/dev/null
KEYCLOAK_ADMIN_PASSWORD="$(openssl rand -hex 8)"
echo "Keycloak admin password $KEYCLOAK_ADMIN_PASSWORD"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
KEYCLOAK_ADMIN_PASSWORD=$KEYCLOAK_ADMIN_PASSWORD
EOF
docker-compose up -d || die "unable to start keycloak"
echo "sleeping a minute while keycloak initializes..."
sleep 30
info "logging into server"
docker-compose exec keycloak \
/opt/keycloak/bin/kcadm.sh \
config credentials \
--server http://localhost:8080/ \
--user admin \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
--realm master \
|| die "unable to login"
info "Create a new realm for '$REALM'"
docker-compose exec keycloak \
/opt/keycloak/bin/kcadm.sh \
create realms \
-s "realm=$REALM" \
-s enabled=true \
|| die "unable to create realm"
# https://github.com/hedgedoc/hedgedoc/issues/56
info "Fix up a id bug"
docker-compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
create client-scopes \
-r "$REALM" \
-f - <<EOF || die "unable to create mapping"
{
"name": "id",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "id",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-property-mapper",
"consentRequired": false,
"config": {
"user.attribute": "id",
"id.token.claim": "true",
"access.token.claim": "true",
"jsonType.label": "String",
"userinfo.token.claim": "true"
}
}
]
}
EOF
if [ -n "$SMTP_SERVER" ]; then
info "configuring email"
docker-compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh update "realms/$REALM" \
-f - <<EOF || die "unable to configure email"
{
"resetPasswordAllowed": "true",
"smtpServer" : {
"auth" : "true",
"starttls" : "true",
"user" : "$SMTP_USER",
"password" : "$SMTP_PASSWORD",
"port" : "$SMTP_PORT",
"host" : "$SMTP_SERVER",
"from" : "keycloak@$DOMAIN_NAME",
"fromDisplayName" : "Keycloak @ $DOMAIN_NAME",
"ssl" : "false"
}
}
EOF
fi
info "Create an admin user in realm"
docker-compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
create users \
-o \
--fields id,username \
-r "$REALM" \
-s username=admin \
-s enabled=true \
-s 'credentials=[{"type":"'$KEYCLOAK_ADMIN_PASSWORD'","value":"admin","temporary":false}]' \
|| die "$REALM: unable to create admin user"

@ -0,0 +1,198 @@
version: '3'
services:
mastodon-db:
image: postgres:13.4-alpine
restart: always
container_name: mastodon-db
#shm_size: 256mb
# networks:
# - internal_network
healthcheck:
test: ['CMD', 'pg_isready', '-U', "mastodon", "-d", "mastodon_production"]
volumes:
- ./data/mastodon/database:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mastodon
- POSTGRES_PASSWORD=mastodon
#- POSTGRES_DB=mastodon_production
env_file:
- mastodon/env.production
mastodon-redis:
image: redis:6-alpine
restart: always
container_name: mastodon-redis
# networks:
# - internal_network
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
env_file:
- mastodon/env.production
volumes:
- ./data/mastodon/redis:/data
mastodon-es:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
restart: always
container_name: mastodon-es
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "cluster.name=es-mastodon"
- "discovery.type=single-node"
- "bootstrap.memory_lock=true"
env_file:
- mastodon/env.production
# networks:
# - internal_network
healthcheck:
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
volumes:
- ./data/mastodon/elasticsearch:/usr/share/elasticsearch/data
# fixup the permissions on the data directory since they are created as root on host
entrypoint: ["/bin/sh", "-c", "chown -R elasticsearch:elasticsearch data && exec /usr/local/bin/docker-entrypoint.sh eswrapper"]
ulimits:
memlock:
soft: -1
hard: -1
mastodon:
image: tootsuite/mastodon
container_name: mastodon
restart: always
#command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 6001"
user: "0:0"
command: ["/entrypoint.sh"]
# networks:
# - external_network
# - internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:6001/health || exit 1']
# ports:
#- '6001:6001'
depends_on:
- mastodon-db
- mastodon-redis
- mastodon-es
volumes:
- ./data/mastodon/system:/mastodon/public/system
- ./mastodon/entrypoint.sh:/entrypoint.sh:ro
env_file:
- mastodon/env.production
environment:
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
- LOCAL_DOMAIN=$DOMAIN_NAME
- OIDC_DISPLAY_NAME=$REALM
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
- OTP_SECRET=${MASTODON_SESSION_SECRET}
- SMTP_SERVER=$SMTP_SERVER
- SMTP_PORT=$SMTP_PORT
- SMTP_LOGIN=$SMTP_USER
- SMTP_PASSWORD=$SMTP_PASSWORD
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
mastodon-streaming:
image: tootsuite/mastodon
restart: always
container_name: mastodon-streaming
environment:
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
- LOCAL_DOMAIN=$DOMAIN_NAME
- OIDC_DISPLAY_NAME=$REALM
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
- OTP_SECRET=${MASTODON_SESSION_SECRET}
- SMTP_SERVER=$SMTP_SERVER
- SMTP_PORT=$SMTP_PORT
- SMTP_LOGIN=$SMTP_USER
- SMTP_PASSWORD=$SMTP_PASSWORD
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
env_file:
- mastodon/env.production
command: node ./streaming
# networks:
# - external_network
# - internal_network
volumes:
- ./data/mastodon/system:/mastodon/public/system
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
depends_on:
- mastodon-db
- mastodon-redis
mastodon-sidekiq:
image: tootsuite/mastodon
restart: always
container_name: mastodon-sidekiq
env_file:
- mastodon/env.production
environment:
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
- LOCAL_DOMAIN=$DOMAIN_NAME
- OIDC_DISPLAY_NAME=$REALM
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
- OTP_SECRET=${MASTODON_SESSION_SECRET}
- SMTP_SERVER=$SMTP_SERVER
- SMTP_PORT=$SMTP_PORT
- SMTP_LOGIN=$SMTP_USER
- SMTP_PASSWORD=$SMTP_PASSWORD
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
command: bundle exec sidekiq
depends_on:
- mastodon-db
- mastodon-redis
# networks:
# - external_network
# - internal_network
volumes:
- ./data/mastodon/system:/mastodon/public/system
healthcheck:
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
## Uncomment to enable federation with tor instances along with adding the following ENV variables
## http_proxy=http://privoxy:8118
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
# tor:
# image: sirboops/tor
# networks:
# - external_network
# - internal_network
#
# privoxy:
# image: sirboops/privoxy
# volumes:
# - ./priv-config:/opt/config
# networks:
# - external_network
# - internal_network
# add the subdomain nginx configuration into the nginx volume
# as well as the cache directory so that nginx can send files directly from it
nginx:
volumes:
- ./mastodon/nginx.conf:/etc/nginx/templates/mastodon.conf.template:ro
- ./data/mastodon/system/cache:/mastodon/system/cache:ro
- ./data/mastodon/system/media_attachments:/mastodon/system/media_attachments:ro
- ./data/mastodon/system/accounts:/mastodon/system/accounts:ro
# add the subdomain client secrets to the keycloak-setup volume
keycloak-setup:
env_file:
- data/mastodon/secrets
volumes:
- ./mastodon/keycloak.sh:/keycloak-setup/mastodon.sh:ro
#networks:
# external_network:
# internal_network:
# internal: true

@ -1,4 +1,5 @@
# Mastodon # Mastodon
This is the vanilla version with Elastic Search and Single-Sign-On enabled. This is the vanilla version with Elastic Search and Single-Sign-On enabled.
No other user accounts are allowed to join. No other user accounts are allowed to join - you must use the Keycloak
server to create accounts and login.

@ -1,131 +0,0 @@
version: '3'
services:
database:
image: postgres:13.4-alpine
restart: always
#shm_size: 256mb
networks:
- internal_network
healthcheck:
test: ['CMD', 'pg_isready', '-U', "mastodon", "-d", "mastodon_production"]
volumes:
- ../data/mastodon/database:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mastodon
- POSTGRES_PASSWORD=mastodon
#- POSTGRES_DB=mastodon_production
redis:
image: redis:6-alpine
restart: always
networks:
- internal_network
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
volumes:
- ../data/mastodon/redis:/data
es:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
restart: always
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "cluster.name=es-mastodon"
- "discovery.type=single-node"
- "bootstrap.memory_lock=true"
networks:
- internal_network
healthcheck:
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
volumes:
- ../data/mastodon/elasticsearch:/usr/share/elasticsearch/data
# fixup the permissions on the data directory since they are created as root on host
entrypoint: /bin/sh -c "chown -R elasticsearch:elasticsearch data && /usr/local/bin/docker-entrypoint.sh eswrapper"
ulimits:
memlock:
soft: -1
hard: -1
mastodon:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 6001"
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:6001/health || exit 1']
ports:
- '6001:6001'
depends_on:
- database
- redis
- es
volumes:
- ../data/mastodon/system:/mastodon/public/system
streaming:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: node ./streaming
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
ports:
- '4000:4000'
depends_on:
- database
- redis
sidekiq:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: bundle exec sidekiq
depends_on:
- database
- redis
networks:
- external_network
- internal_network
volumes:
- ../data/mastodon/system:/mastodon/public/system
healthcheck:
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
## Uncomment to enable federation with tor instances along with adding the following ENV variables
## http_proxy=http://privoxy:8118
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
# tor:
# image: sirboops/tor
# networks:
# - external_network
# - internal_network
#
# privoxy:
# image: sirboops/privoxy
# volumes:
# - ./priv-config:/opt/config
# networks:
# - external_network
# - internal_network
networks:
external_network:
internal_network:
internal: true

@ -0,0 +1,38 @@
#!/bin/bash -x
id
export
pwd
rm -f /mastodon/tmp/pids/server.pid
export MASTODON_DIR=/mastodon/public/system
export VAPID_KEY="$MASTODON_DIR/vapid_key"
export DB_SETUP="$MASTODON_DIR/db_done"
which rails
chown -R mastodon:mastodon "$MASTODON_DIR"
#exec su mastodon /bin/bash - <<EOF
exec su mastodon <<EOF
export PATH="$PATH:/opt/ruby/bin:/opt/node/bin:/opt/mastodon/bin"
if [ ! -r "$VAPID_KEY" ]; then
rails mastodon:webpush:generate_vapid_key > "$VAPID_KEY" \
|| exit 1
fi
. "$VAPID_KEY"
if [ ! -r "$DB_SETUP" ]; then
rails db:setup \
|| exit 1
touch "$DB_SETUP"
fi
exec bundle exec rails s -p 6001
EOF

@ -19,12 +19,12 @@
# Redis # Redis
# ----- # -----
REDIS_HOST=redis REDIS_HOST=mastodon-redis
REDIS_PORT=6379 REDIS_PORT=6379
# PostgreSQL # PostgreSQL
# ---------- # ----------
DB_HOST=database DB_HOST=mastodon-db
DB_USER=mastodon DB_USER=mastodon
DB_NAME=mastodon_production DB_NAME=mastodon_production
DB_PASS=mastodon DB_PASS=mastodon
@ -33,7 +33,7 @@ DB_PORT=5432
# Elasticsearch (optional) # Elasticsearch (optional)
# ------------------------ # ------------------------
ES_ENABLED=true ES_ENABLED=true
ES_HOST=es ES_HOST=mastodon-es
ES_PORT=9200 ES_PORT=9200
# Authentication for ES (optional) # Authentication for ES (optional)
ES_USER=elastic ES_USER=elastic
@ -67,6 +67,9 @@ ES_PASS=password
#AWS_SECRET_ACCESS_KEY= #AWS_SECRET_ACCESS_KEY=
#S3_ALIAS_HOST=files.example.com #S3_ALIAS_HOST=files.example.com
# Do not use sendfile since this is fronted by nginx
RAILS_SERVE_STATIC_FILES=false
# do not allow normal logins # do not allow normal logins
OMNIAUTH_ONLY=true OMNIAUTH_ONLY=true

@ -0,0 +1,3 @@
#!/bin/bash -x
client-create mastodon "$MASTODON_HOSTNAME.$DOMAIN_NAME" "$MASTODON_CLIENT_SECRET" </dev/null

@ -0,0 +1,154 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream mastodon-backend {
server mastodon:6001 fail_timeout=0;
}
upstream mastodon-streaming {
server mastodon-streaming:4000 fail_timeout=0;
}
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=CACHE:10m inactive=7d max_size=1g;
server {
listen 443 ssl http2;
server_name ${MASTODON_HOSTNAME} ${MASTODON_HOSTNAME}.${DOMAIN_NAME};
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!MEDIUM:!LOW:!aNULL:!NULL:!SHA;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off;
include /etc/nginx/includes/challenge.conf;
# Uncomment these lines once you acquire a certificate:
# ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
# ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
keepalive_timeout 70;
sendfile on;
client_max_body_size 80m;
root /mastodon;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml image/x-icon;
location / {
try_files $uri @proxy;
}
# If Docker is used for deployment and Rails serves static files,
# then needed must replace line `try_files $uri =404;` with `try_files $uri @proxy;`.
location = /sw.js {
add_header Cache-Control "public, max-age=604800, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
#try_files $uri =404;
try_files $uri @proxy;
}
location ~ ^/assets/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/avatars/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/emoji/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/headers/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/packs/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/shortcuts/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/sounds/ {
add_header Cache-Control "public, max-age=2419200, must-revalidate";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ~ ^/system/ {
add_header Cache-Control "public, max-age=2419200, immutable";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
try_files $uri @proxy;
}
location ^~ /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Proxy "";
proxy_pass http://mastodon-streaming;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
tcp_nodelay on;
}
location @proxy {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://mastodon-backend;
proxy_buffering on;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_cache CACHE;
proxy_cache_valid 200 7d;
proxy_cache_valid 410 24h;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
add_header X-Cached $upstream_cache_status;
tcp_nodelay on;
}
error_page 404 500 501 502 503 504 /500.html;
}

@ -1,78 +0,0 @@
#!/bin/bash
die() { echo >&2 "ERROR: $@" ; exit 1 ; }
info() { echo >&2 "$@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production
source ./env.production
source "../env.smtp" 2>/dev/null
mkdir -p ../data/mastodon/system
chmod 777 ../data/mastodon/system
SECRETS="../data/mastodon/env.secrets"
if [ -r "$SECRETS" ]; then
docker-compose up -d || die "unable to restart mastodon"
exit 0
fi
# have to bring it all down before we touch the files
docker-compose down
OIDC_CLIENT_SECRET="$(openssl rand -hex 32)"
# create the secrets file,
# along with some parameters that should be in the environment
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
WEB_DOMAIN=$MASTODON_HOSTNAME
LOCAL_DOMAIN=$DOMAIN_NAME
OIDC_DISPLAY_NAME=$REALM
OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME/realms/$REALM
OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME/auth/auth/openid_connect/callback
OIDC_CLIENT_SECRET=$OIDC_CLIENT_SECRET
SECRET_KEY_BASE=$(openssl rand -hex 32)
OTP_SECRET=$(openssl rand -hex 32)
EOF
if [ -n "$SMTP_SERVER" ]; then
cat <<EOF >> "$SECRETS"
SMTP_SERVER=$SMTP_SERVER
SMTP_PORT=$SMTP_PORT
SMTP_LOGIN=$SMTP_USER
SMTP_PASSWORD=$SMTP_PASSWORD
SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
EOF
fi
info "mastodon: creating push keys"
docker-compose run --rm mastodon \
rails mastodon:webpush:generate_vapid_key \
>> "$SECRETS" \
|| die "unable to generate vapid key"
info "mastodon: setting up database"
docker-compose run --rm mastodon \
rails db:setup \
|| die "unable to login"
source "$SECRETS"
info "mastodon: creating keycloak interface"
../keycloak/client-delete mastodon
../keycloak/client-create <<EOF || die "Unable to create keycloak client"
{
"clientId": "mastodon",
"rootUrl": "https://$MASTODON_HOSTNAME/",
"adminUrl": "https://$MASTODON_HOSTNAME/",
"redirectUris": [ "https://$MASTODON_HOSTNAME/*" ],
"webOrigins": [ "https://$MASTODON_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$OIDC_CLIENT_SECRET"
}
EOF
docker-compose up -d || die "mastodon: unable to start container"

@ -0,0 +1,30 @@
version: '3.9'
services:
nginx:
# image: nginx:1.21-alpine
build:
context: nginx
dockerfile: Dockerfile
restart: always
#entrypoint: /bin/sh
container_name: nginx
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/etc/includes:/etc/nginx/includes:ro
- ./nginx/etc/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/default.conf:/etc/nginx/templates/default.conf.template:ro
- ./html:/var/www/html:ro
- ./data/nginx/certbot/www:/var/www/certbot:ro
- ./data/nginx/certbot/conf:/etc/letsencrypt:rw
- /home:/home:ro
env_file:
- env.production
certbot:
image: certbot/certbot
container_name: certbot
volumes:
- ./data/nginx/certbot/conf:/etc/letsencrypt
- ./data/nginx/certbot/www:/var/www/certbot

@ -0,0 +1,30 @@
FROM alpine
RUN apk update
RUN echo "building" \
&& apk add \
nginx \
collectd \
collectd-nginx \
nginx-mod-http-vts \
gettext \
curl \
openssl \
&& mkdir -p \
/etc/nginx/modules-enabled \
/etc/nginx/conf.d \
/docker-entrypoint.d \
&& ln -sf /etc/nginx/modules/10_http_vts.conf /etc/nginx/modules-enabled \
# forward request and error logs to docker log collector
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& echo "Done"
COPY ["docker-entrypoint.d/*", "/docker-entrypoint.d/" ]
COPY ["docker-entrypoint.sh", "/" ]
ENTRYPOINT ["/docker-entrypoint.sh"]
EXPOSE 80
STOPSIGNAL SIGQUIT
CMD ["nginx", "-g", "daemon off;"]

@ -1,34 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production
source ./env.production
domain_args="-d $DOMAIN_NAME,$KEYCLOAK_HOSTNAME,$HEDGEDOC_HOSTNAME,$MASTODON_HOSTNAME,$NEXTCLOUD_HOSTNAME,$GRAFANA_HOSTNAME,$MATRIX_HOSTNAME,$GITEA_HOSTNAME,$MOBILIZON_HOSTNAME,$PIXELFED_HOSTNAME"
rsa_key_size=2048
set -x
# move the temp live directory away if
# this is the first time we've run anything here
if [ ! -d "../data/certbot/conf/accounts" ]; then
echo "deleting temp keys"
rm -rf ../data/certbot/conf/live
fi
docker-compose run --rm certbot \
certonly \
--webroot \
--webroot-path /var/www/certbot \
--email "admin@$DOMAIN_NAME" \
--rsa-key-size "$rsa_key_size" \
--agree-tos \
--no-eff-email \
--force-renewal \
$domain_args \
|| die "unable to renew!"
docker-compose exec nginx nginx -s reload

@ -1,6 +1,30 @@
# Redirect *all* port 80 traffic to the same thing on port 443 vhost_traffic_status_zone;
server { server {
listen 80 default_server; listen 80 default_server;
# this works on the docker container with http_stub built in
# only allow from localhost
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
# this works with the vts module
location /status {
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
access_log off;
#allow 127.0.0.1;
#deny all;
}
# forward certbot challenges to the certbot directory
include /etc/nginx/includes/challenge.conf;
# Redirect *all other* port 80 traffic to the same thing on port 443
location / { location / {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
@ -27,13 +51,16 @@ server {
chunked_transfer_encoding on; chunked_transfer_encoding on;
# delegated Matrix server # delegated Matrix server
location /.well-known/matrix { # location /.well-known/matrix {
proxy_pass https://${MATRIX_HOSTNAME}; # proxy_pass https://${MATRIX_HOSTNAME}.${DOMAIN_NAME};
} # }
# separate Mastodon WEB_DOMAIN and LOCAL_DOMAIN # separate Mastodon WEB_DOMAIN and LOCAL_DOMAIN
location = /.well-known/host-meta { location = /.well-known/host-meta {
return 302 https://${MASTODON_HOSTNAME}$request_uri; return 302 https://${MASTODON_HOSTNAME}.${DOMAIN_NAME}$request_uri;
}
location = /.well-known/webfinger {
return 302 https://${MASTODON_HOSTNAME}.${DOMAIN_NAME}$request_uri;
} }
# tilde club home directories # tilde club home directories
@ -66,7 +93,7 @@ server {
proxy_hide_header Content-Security-Policy; proxy_hide_header Content-Security-Policy;
add_header Content-Security-Policy "script-src 'self' 'unsafe-inline' 'unsafe-eval' *.${DOMAIN_NAME}; frame-src 'self' *.${DOMAIN_NAME}; object-src 'self'; base-uri 'self' *.${DOMAIN_NAME}"; add_header Content-Security-Policy "script-src 'self' 'unsafe-inline' 'unsafe-eval' *.${DOMAIN_NAME}; frame-src 'self' *.${DOMAIN_NAME}; object-src 'self'; base-uri 'self' *.${DOMAIN_NAME}";
proxy_pass http://host.docker.internal:3000/s$request_uri; proxy_pass http://hedgedoc:3000/s$request_uri;
proxy_cache_valid any 1m; proxy_cache_valid any 1m;
} }
@ -77,7 +104,7 @@ server {
proxy_ignore_headers Cache-Control; proxy_ignore_headers Cache-Control;
proxy_cache_valid any 1m; proxy_cache_valid any 1m;
proxy_pass http://host.docker.internal:3000$request_uri; proxy_pass http://hedgedoc:3000$request_uri;
} }
listen 443 ssl default_server; listen 443 ssl default_server;

@ -1,28 +0,0 @@
version: '3'
services:
nginx:
image: nginx:1.21-alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/templates:/etc/nginx/templates:ro
- ./nginx/includes:/etc/nginx/includes:ro
- ../html:/var/www/html:ro
- ../data/certbot/www:/var/www/certbot:ro
- ../data/certbot/conf:/etc/letsencrypt:ro
- ../data/nginx/cache:/data/nginx/cache:rw
- /home:/home:ro
env_file:
- ../env.production
- env.production
extra_hosts:
- "host.docker.internal:host-gateway"
certbot:
image: certbot/certbot
volumes:
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot

@ -0,0 +1,12 @@
#!/bin/sh -x
touch /started
#cat >> /etc/collectd/collectd.conf <<EOF
cat /etc/collectd/collectd.conf - > /tmp/conf <<EOF
LoadPlugin nginx
<Plugin "nginx">
URL "http://localhost:80/nginx_status"
</Plugin>
EOF
#collectd

@ -0,0 +1,31 @@
#!/bin/sh
mkdir -p /data/nginx/cache
if [ -z "$DOMAIN_NAME" ]; then
DOMAIN_NAME="example.com"
fi
certdir="/etc/letsencrypt/live/${DOMAIN_NAME}"
if [ -r "$certdir/fullchain.pem" ]; then
exit 0
fi
mkdir -p "$certdir"
echo >&2 "$certdir: Creating temporary keys"
openssl req \
-x509 \
-newkey rsa:2048 \
-keyout "$certdir/privkey.pem" \
-out "$certdir/fullchain.pem" \
-sha256 \
-nodes \
-days 365 \
-subj "/CN=$DOMAIN_NAME'" \
|| exit 1
echo >&2 "$certdir: Generated temporary keys -- certbot needs to request real ones"
exit 0

@ -0,0 +1,39 @@
#!/bin/sh
set -e
ME=$(basename $0)
entrypoint_log() {
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
echo "$@"
fi
}
auto_envsubst() {
local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
local filter="${NGINX_ENVSUBST_FILTER:-}"
local template defined_envs relative_path output_path subdir
defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
[ -d "$template_dir" ] || return 0
if [ ! -w "$output_dir" ]; then
entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
return 0
fi
find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
relative_path="${template#$template_dir/}"
output_path="$output_dir/${relative_path%$suffix}"
subdir=$(dirname "$relative_path")
# create a subdirectory where the template file exists
mkdir -p "$output_dir/$subdir"
entrypoint_log "$ME: Running envsubst on $template to $output_path"
envsubst "$defined_envs" < "$template" > "$output_path"
done
}
auto_envsubst
exit 0

@ -0,0 +1,47 @@
#!/bin/sh
# vim:sw=4:ts=4:et
set -e
entrypoint_log() {
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
echo "$@"
fi
}
if [ "$1" = "nginx" -o "$1" = "nginx-debug" ]; then
if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
case "$f" in
*.envsh)
if [ -x "$f" ]; then
entrypoint_log "$0: Sourcing $f";
. "$f"
else
# warn on shell scripts without exec bit
entrypoint_log "$0: Ignoring $f, not executable";
fi
;;
*.sh)
if [ -x "$f" ]; then
entrypoint_log "$0: Launching $f";
"$f"
else
# warn on shell scripts without exec bit
entrypoint_log "$0: Ignoring $f, not executable";
fi
;;
*) entrypoint_log "$0: Ignoring $f";;
esac
done
entrypoint_log "$0: Configuration complete; ready for start up"
else
entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
fi
fi
exec "$@"

@ -70,26 +70,9 @@ http {
include /etc/nginx/conf.d/*.conf; include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*; include /etc/nginx/sites-enabled/*;
include /tmp/sites-enabled/*; include /tmp/sites-enabled/*;
}
log_format main 'XXXX $http_x_forwarded_for - $remote_user [$time_local] "$host" "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $request_time';
#mail { }
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

@ -1,30 +0,0 @@
server {
server_name ${PIXELFED_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
location / {
proxy_pass http://host.docker.internal:8090;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -1,41 +0,0 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name social.${DOMAIN_NAME};
client_max_body_size 128m;
location / {
proxy_pass http://host.docker.internal:6001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
}
location /api/v1/streaming {
proxy_pass http://host.docker.internal:4000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
tcp_nodelay on;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -1,39 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env"
source env.production || die "no local env"
if [ -z "${DOMAIN_NAME}" ]; then
die "DOMAIN_NAME not set"
fi
certdir="../data/certbot/conf/live/${DOMAIN_NAME}"
if [ -r "$certdir/privkey.pem" ]; then
docker-compose up -d || die "nginx: unable to start"
exit 0
fi
mkdir -p "$certdir" || die "$certdir: unable to make"
openssl req \
-x509 \
-newkey rsa:2048 \
-keyout "$certdir/privkey.pem" \
-out "$certdir/fullchain.pem" \
-sha256 \
-nodes \
-days 365 \
-subj "/CN=${DOMAIN_NAME}'" \
|| die "$certdir/privkey.pem: unable to create temp key"
docker-compose up -d || die "unable to bring up nginx"
echo "SLEEPING..."
sleep 10
./certbot-renew || die "unable to create certs"

@ -0,0 +1,18 @@
version: '3'
services:
prometheus:
image: prom/prometheus
container_name: prometheus
volumes:
- ./data/prometheus/storage:/prometheus
- ./prometheus/prometheus.yaml:/etc/prometheus/prometheus.yml:ro
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro

@ -0,0 +1,21 @@
global:
scrape_interval: 15s
external_labels:
monitor: 'codelab-monitor'
scrape_configs:
# nginx vts data
- job_name: 'nginx'
scrape_interval: 5s
metrics_path: "/status/format/prometheus"
static_configs:
- targets: ['nginx:80']
- job_name: 'metrics'
scrape_interval: 5s
static_configs:
# grafana data from /metrics
- targets: ['dashboard:3000']
# host running the docker-compose
- targets: ['172.17.0.1:9100']
# cadvisor system
- targets: ['cadvisor:8080']

@ -1,39 +0,0 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
which jq > /dev/null || die "jq not installed?"
which docker-compose > /dev/null || die "docker-compose not installed?"
source ./env.production || die "no production env?"
if [ -z "$DOMAIN_NAME" ]; then
die "\$DOMAIN_NAME not set; things will break"
fi
SERVICES=nginx # there is no host
SERVICES+=\ keycloak
SERVICES+=\ hedgedoc
SERVICES+=\ nextcloud
SERVICES+=\ mastodon
SERVICES+=\ grafana
SERVICES+=\ matrix
SERVICES+=\ gitea
SERVICES+=\ mobilizon
HOSTS+=\ $KEYCLOAK_HOST
HOSTS+=\ $HEDGEDOC_HOST
HOSTS+=\ $NEXTCLOUD_HOST
HOSTS+=\ $MASTODON_HOST
HOSTS+=\ $GRAFANA_HOST
HOSTS+=\ $MATRIX_HOST
HOSTS+=\ $GITEA_HOST
HOSTS+=\ $MOBILIZON_HOST
for host in $HOSTS ; do
host $host > /dev/null || die "$host: DNS entry not present?"
done
for service in $SERVICES ; do
echo "$service: starting"
./$service/setup || die "$server: failed to start"
done

@ -1,7 +0,0 @@
#!/bin/bash
for file in */docker-compose.yaml ; do
dir="$(dirname "$file")"
echo "$dir"
( cd "$dir" ; docker-compose down )
done
Loading…
Cancel
Save