Docker development and run support

This commit is contained in:
Andrew Morgan 2020-08-11 17:37:23 -07:00
parent af22f00868
commit ba9be7a0f9
8 changed files with 490 additions and 0 deletions

5
docker/.env Normal file
View file

@ -0,0 +1,5 @@
# Default environment variables used in docker-compose.yml.
# Overridden by the host's environment variables
# Where `localhost` should route to
HOST_IP_ADDRESS=127.0.0.1

101
docker/Dockerfile Normal file
View file

@ -0,0 +1,101 @@
# To build the image, run `docker build` command from the root of the
# repository:
#
# docker build -f docker/Dockerfile .
#
# There is an optional PYTHON_VERSION build argument which sets the
# version of python to build against. For example:
#
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.8 .
#
# An optional LIBOLM_VERSION build argument which sets the
# version of libolm to build against. For example:
#
# docker build -f docker/Dockerfile --build-arg LIBOLM_VERSION=3.1.4 .
#
##
## Creating a builder container
##
# We use an initial docker container to build all of the runtime dependencies,
# then transfer those dependencies to the container we're going to ship,
# before throwing this one away
ARG PYTHON_VERSION=3.8
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder
##
## Build libolm for matrix-nio e2e support
##
# Install libolm build dependencies
ARG LIBOLM_VERSION=3.1.4
RUN apk add --no-cache \
make \
cmake \
gcc \
g++ \
git \
libffi-dev \
yaml-dev \
python3-dev
# Build libolm
#
# Also build the libolm python bindings and place them at /python-libs
# We will later copy contents from both of these folders to the runtime
# container
COPY docker/build_and_install_libolm.sh /scripts/
RUN /scripts/build_and_install_libolm.sh ${LIBOLM_VERSION} /python-libs
# Install Postgres dependencies
RUN apk add --no-cache \
musl-dev \
libpq \
postgresql-dev
# Install python runtime modules. We do this before copying the source code
# such that these dependencies can be cached
# This speeds up subsequent image builds when the source code is changed
RUN mkdir -p /src/my_project_name
COPY my_project_name/__init__.py /src/my_project_name/
COPY README.md my-project-name /src/
# Build the dependencies
COPY setup.py /src/setup.py
RUN pip install --prefix="/python-libs" --no-warn-script-location "/src/.[postgres]"
# Now copy the source code
COPY *.py *.md /src/
COPY my_project_name/*.py /src/my_project_name/
# And build the final module
RUN pip install --prefix="/python-libs" --no-warn-script-location "/src/.[postgres]"
##
## Creating the runtime container
##
# Create the container we'll actually ship. We need to copy libolm and any
# python dependencies that we built above to this container
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
# Copy python dependencies from the "builder" container
COPY --from=builder /python-libs /usr/local
# Copy libolm from the "builder" container
COPY --from=builder /usr/local/lib/libolm* /usr/local/lib/
# Install any native runtime dependencies
RUN apk add --no-cache \
libstdc++ \
libpq \
postgresql-dev
# Specify a volume that holds the config file, SQLite3 database,
# and the matrix-nio store
VOLUME ["/data"]
# Start the bot
ENTRYPOINT ["my-project-name", "/data/config.yaml"]

71
docker/Dockerfile.dev Normal file
View file

@ -0,0 +1,71 @@
# This dockerfile is crafted specifically for development purposes.
# Please use `Dockerfile` instead if you wish to deploy for production.
#
# This file differs as it does not use a builder container, nor does it
# reinstall the project's python package after copying the source code,
# saving significant time during rebuilds.
#
# To build the image, run `docker build` command from the root of the
# repository:
#
# docker build -f docker/Dockerfile .
#
# There is an optional PYTHON_VERSION build argument which sets the
# version of python to build against. For example:
#
# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.8 .
#
# An optional LIBOLM_VERSION build argument which sets the
# version of libolm to build against. For example:
#
# docker build -f docker/Dockerfile --build-arg LIBOLM_VERSION=3.1.4 .
#
ARG PYTHON_VERSION=3.8
FROM docker.io/python:${PYTHON_VERSION}-alpine3.11
##
## Build libolm for matrix-nio e2e support
##
# Install libolm build dependencies
ARG LIBOLM_VERSION=3.1.4
RUN apk add --no-cache \
make \
cmake \
gcc \
g++ \
git \
libffi-dev \
yaml-dev \
python3-dev
# Build libolm
COPY docker/build_and_install_libolm.sh /scripts/
RUN /scripts/build_and_install_libolm.sh ${LIBOLM_VERSION}
# Install native runtime dependencies
RUN apk add --no-cache \
musl-dev \
libpq \
postgresql-dev \
libstdc++
# Install python runtime modules. We do this before copying the source code
# such that these dependencies can be cached
RUN mkdir -p /src/my_project_name
COPY my_project_name/__init__.py /src/my_project_name/
COPY README.md my-project-name /src/
COPY setup.py /src/setup.py
RUN pip install -e "/src/.[postgres]"
# Now copy the source code
COPY my_project_name/*.py /src/my_project_name/
COPY *.py /src/
# Specify a volume that holds the config file, SQLite3 database,
# and the matrix-nio store
VOLUME ["/data"]
# Start the app
ENTRYPOINT ["my-project-name", "/data/config.yaml"]

152
docker/README.md Normal file
View file

@ -0,0 +1,152 @@
# Docker
The docker image will run my-project-name with a SQLite database and
end-to-end encryption dependencies included. For larger deployments, a
connection to a Postgres database backend is recommended.
## Setup
### The `/data` volume
The docker container expects the `config.yaml` file to exist at
`/data/config.yaml`. To easily configure this, it is recommended to create a
directory on your filesystem, and mount it as `/data` inside the container:
```
mkdir data
```
We'll later mount this directory into the container so that its contents
persist across container restarts.
### Creating a config file
Copy `sample.config.yaml` to a file named `config.yaml` inside of your newly
created `data` directory. Fill it out as you normally would, with a few minor
differences:
* The bot store directory should reside inside of the data directory so that it
is not wiped on container restart. Change it from the default to `/data/store`.
There is no need to create this directory yourself, my-project-name will
create it on startup if it does not exist.
* Choose whether you want to use SQLite or Postgres as your database backend. If
using SQLite, ensure your database file is stored inside the `/data` directory:
```
database: "sqlite:///data/bot.db"
```
If using postgres, point to your postgres instance instead:
```
database: "postgres://username:password@postgres/my-project-name?sslmode=disable"
```
**Note:** a postgres container is defined in `docker-compose.yaml` for your convenience.
If you would like to use it, set your database connection string to:
```
database: "postgres://postgres:somefancypassword@postgres/postgres?sslmode=disable"
```
The password `somefancypassword` is defined in the docker compose file.
Change any other config values as necessary. For instance, you may also want to
store log files in the `/data` directory.
## Running
First, create a volume for the data directory created in the above section:
```
docker volume create \
--opt type=none \
--opt o=bind \
--opt device="/path/to/data/dir" data_volume
```
If you want to use the postgres container defined in `docker-compose.yaml`, start that
first:
```
docker-compose up -d postgres
```
Start the bot with:
```
docker-compose up my-project-name
```
This will run the bot and log the output to the terminal. You can instead run
the container detached with the `-d` flag:
```
docker-compose up -d my-project-name
```
(Logs can later be accessed with the `docker logs` command).
This will use the `latest` tag from
[Docker Hub](https://hub.docker.com/somebody/my-project-name).
If you would rather run from the checked out code, you can use:
```
docker-compose up local-checkout
```
This will build an optimized, production-ready container. If you are developing
instead and would like a development container for testing local changes, use
the `start-dev.sh` script and consult [CONTRIBUTING.md](../CONTRIBUTING.md).
**Note:** If you are trying to connect to a Synapse instance running on the
host, you need to allow the IP address of the docker container to connect. This
is controlled by `bind_addresses` in the `listeners` section of Synapse's
config. If present, either add the docker internal IP address to the list, or
remove the option altogether to allow all addresses.
## Updating
To update the container, navigate to the bot's `docker` directory and run:
```
docker-compose pull my-project-name
```
Then restart the bot.
## Systemd
A systemd service file is provided for your convenience at
[my-project-name.service](my-project-name.service). The service uses
`docker-compose` to start and stop the bot.
Copy the file to `/etc/systemd/system/my-project-name.service` and edit to
match your setup. You can then start the bot with:
```
systemctl start my-project-name
```
and stop it with:
```
systemctl stop my-project-name
```
To run the bot on system startup:
```
systemctl enable my-project-name
```
## Building the image
To build a production image from source, use the following `docker build` command
from the repo's root:
```
docker build -t somebody/my-project-name:latest -f docker/Dockerfile .
```

View file

@ -0,0 +1,32 @@
#!/usr/bin/env sh
#
# Call with the following arguments:
#
# ./build_and_install_libolm.sh <libolm version> <python bindings install dir>
#
# Example:
#
# ./build_and_install_libolm.sh 3.1.4 /python-bindings
#
# Note that if a python bindings installation directory is not supplied, bindings will
# be installed to the default directory.
#
set -ex
# Download the specified version of libolm
git clone -b "$1" https://gitlab.matrix.org/matrix-org/olm.git olm && cd olm
# Build libolm
cmake . -Bbuild
cmake --build build
# Install
make install
# Build the python3 bindings
cd python && make olm-python3
# Install python3 bindings
mkdir -p "$2" || true
DESTDIR="$2" make install-python3

64
docker/docker-compose.yml Normal file
View file

@ -0,0 +1,64 @@
version: '3.1' # specify docker-compose version
volumes:
# Set up with `docker volume create ...`. See docker/README.md for more info.
data_volume:
external: true
pg_data_volume:
services:
# Runs from the latest release
my-project-name:
image: somebody/my-project-name
restart: always
volumes:
- data_volume:/data
# Used for allowing connections to homeservers hosted on the host machine
# (while docker host mode is still broken on Linux).
#
# Defaults to 127.0.0.1 and is set in docker/.env
extra_hosts:
- "localhost:${HOST_IP_ADDRESS}"
# Builds and runs an optimized container from local code
local-checkout:
build:
context: ..
dockerfile: docker/Dockerfile
# Build arguments may be specified here
# args:
# PYTHON_VERSION: 3.8
volumes:
- data_volume:/data
# Used for allowing connections to homeservers hosted on the host machine
# (while docker host networking mode is still broken on Linux).
#
# Defaults to 127.0.0.1 and is set in docker/.env
extra_hosts:
- "localhost:${HOST_IP_ADDRESS}"
# Builds and runs a development container from local code
local-checkout-dev:
build:
context: ..
dockerfile: docker/Dockerfile.dev
# Build arguments may be specified here
# args:
# PYTHON_VERSION: 3.8
volumes:
- data_volume:/data
# Used for allowing connections to homeservers hosted on the host machine
# (while docker host networking mode is still broken on Linux).
#
# Defaults to 127.0.0.1 and is set in docker/.env
extra_hosts:
- "localhost:${HOST_IP_ADDRESS}"
# Starts up a postgres database
postgres:
image: postgres
restart: always
volumes:
- pg_data_volume:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: somefancypassword

View file

@ -0,0 +1,16 @@
[Unit]
Description=A matrix bot that does amazing things!
[Service]
Type=simple
User=my-project-name
Group=my-project-name
WorkingDirectory=/path/to/my-project-name/docker
ExecStart=/usr/bin/docker-compose up my-project-name
ExecStop=/usr/bin/docker-compose stop my-project-name
RemainAfterExit=yes
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target

49
docker/start-dev.sh Executable file
View file

@ -0,0 +1,49 @@
#!/bin/bash
# A script to quickly setup a running development environment
#
# It's primary purpose is to set up docker networking correctly so that
# the bot can connect to remote services as well as those hosted on
# the host machine.
#
# Change directory to where this script is located. We'd like to run
# `docker-compose` in the same directory to use the adjacent
# docker-compose.yml and .env files
cd `dirname "$0"`
function on_exit {
cd -
}
# Ensure we change back to the old directory on script exit
trap on_exit EXIT
# To allow the docker container to connect to services running on the host,
# we need to use the host's internal ip address. Attempt to retrieve this.
#
# Check whether the ip address has been defined in the environment already
if [ -z "$HOST_IP_ADDRESS" ]; then
# It's not defined. Try to guess what it is
# First we try the `ip` command, available primarily on Linux
export HOST_IP_ADDRESS="`ip route get 1 | sed -n 's/^.*src \([0-9.]*\) .*$/\1/p'`"
if [ $? -ne 0 ]; then
# That didn't work. `ip` isn't available on old Linux systems, or MacOS.
# Try `ifconfig` instead
export HOST_IP_ADDRESS="`ifconfig $(netstat -rn | grep -E "^default|^0.0.0.0" | head -1 | awk '{print $NF}') | grep 'inet ' | awk '{print $2}' | grep -Eo '([0-9]*\.){3}[0-9]*'`"
if [ $? -ne 0 ]; then
# That didn't work either, give up
echo "
Unable to determine host machine's internal IP address.
Please set HOST_IP_ADDRESS environment variable manually and re-run this script.
If you do not have a need to connect to a homeserver running on the host machine,
set HOST_IP_ADDRESS=127.0.0.1"
exit 1
fi
fi
fi
# Build and run latest code
docker-compose up --build local-checkout-dev