# Conflicts:
#	nginx.tmpl
This commit is contained in:
Tim Schneider 2021-05-11 20:22:33 +02:00
commit e30771e7fc
63 changed files with 683 additions and 386 deletions

View file

@ -1,9 +1,5 @@
# !!!PLEASE READ!!! # !!!PLEASE READ!!!
## Questions
If you have a question, DO NOT SUBMIT a new issue. Please ask the question on the Q&A Group: https://groups.google.com/forum/#!forum/nginx-proxy
## Bugs or Features ## Bugs or Features
If you are logging a bug or feature request, please search the current open issues to see if there is already a bug or feature opened. If you are logging a bug or feature request, please search the current open issues to see if there is already a bug or feature opened.

11
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,11 @@
version: 2
updates:
# Maintain dependencies for Docker
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "daily"
labels:
- "area/chore"
- "area/dockerfile"

114
.github/workflows/dockerhub.yml vendored Normal file
View file

@ -0,0 +1,114 @@
name: DockerHub
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * 1'
push:
branches:
- main
tags:
- '*.*.*'
paths-ignore:
- 'test/*'
- '.gitignore'
- '.travis.yml'
- 'docker-compose-separate-containers.yml'
- 'docker-compose.yml'
- 'LICENSE'
- 'Makefile'
- '*.md'
jobs:
multiarch-build-debian:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get Docker tags for Debian based image
id: docker_meta_debian
uses: crazy-max/ghaction-docker-meta@v2
with:
images: |
nginxproxy/nginx-proxy
jwilder/nginx-proxy
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable=${{ endsWith(github.ref, github.event.repository.default_branch) }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push the Debian based image
id: docker_build_debian
uses: docker/build-push-action@v2
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: ${{ steps.docker_meta_debian.outputs.tags }}
labels: ${{ steps.docker_meta_debian.outputs.labels }}
- name: Images digests
run: echo ${{ steps.docker_build_debian.outputs.digest }}
multiarch-build-alpine:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get Docker tags for Alpine based image
id: docker_meta_alpine
uses: crazy-max/ghaction-docker-meta@v2
with:
images: |
nginxproxy/nginx-proxy
jwilder/nginx-proxy
tags: |
type=semver,suffix=-alpine,pattern={{version}}
type=semver,suffix=-alpine,pattern={{major}}.{{minor}}
type=raw,value=alpine,enable=${{ endsWith(github.ref, github.event.repository.default_branch) }}
flavor: latest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push the Alpine based image
id: docker_build_alpine
uses: docker/build-push-action@v2
with:
file: Dockerfile.alpine
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: ${{ steps.docker_meta_alpine.outputs.tags }}
labels: ${{ steps.docker_meta_alpine.outputs.labels }}
- name: Images digests
run: echo ${{ steps.docker_build_alpine.outputs.digest }}

45
.github/workflows/test.yml vendored Normal file
View file

@ -0,0 +1,45 @@
name: Tests
on:
push:
paths-ignore:
- 'LICENSE'
- '**.md'
pull_request:
paths-ignore:
- 'LICENSE'
- '**.md'
jobs:
unit:
name: Unit Tests
runs-on: ubuntu-latest
strategy:
fail-fast: true
matrix:
base_docker_image: [alpine, debian]
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r python-requirements.txt
working-directory: test/requirements
- name: Build Docker web server image
run: make build-webserver
- name: Build Docker nginx proxy test image
run: make build-nginx-proxy-test-${{ matrix.base_docker_image }}
- name: Run tests
run: pytest
working-directory: test

View file

@ -1,22 +0,0 @@
dist: trusty
sudo: required
env:
matrix:
- TEST_TARGET: test-debian
- TEST_TARGET: test-alpine
before_install:
- sudo apt-get -y remove docker docker-engine docker-ce
- sudo rm /etc/apt/sources.list.d/docker.list
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y install docker-ce
- docker version
- docker info
# prepare docker test requirements
- make update-dependencies
script:
- make $TEST_TARGET

View file

@ -1,5 +1,46 @@
FROM nginx:1.13 # setup build arguments for version of dependencies to use
LABEL maintainer="Jason Wilder mail@jasonwilder.com" ARG DOCKER_GEN_VERSION=0.7.6
ARG FOREGO_VERSION=0.16.1
# Use a specific version of golang to build both binaries
FROM golang:1.15.10 as gobuilder
# Build docker-gen from scratch
FROM gobuilder as dockergen
ARG DOCKER_GEN_VERSION
RUN git clone https://github.com/jwilder/docker-gen \
&& cd /go/docker-gen \
&& git -c advice.detachedHead=false checkout $DOCKER_GEN_VERSION \
&& go mod download \
&& CGO_ENABLED=0 GOOS=linux go build -ldflags "-X main.buildVersion=${DOCKER_GEN_VERSION}" ./cmd/docker-gen \
&& go clean -cache \
&& mv docker-gen /usr/local/bin/ \
&& cd - \
&& rm -rf /go/docker-gen
# Build forego from scratch
# Because this relies on golang workspaces, we need to use go < 1.8.
FROM gobuilder as forego
# Download the sources for the given version
ARG FOREGO_VERSION
ADD https://github.com/jwilder/forego/archive/v${FOREGO_VERSION}.tar.gz sources.tar.gz
# Move the sources into the right directory
RUN tar -xzf sources.tar.gz && \
mkdir -p /go/src/github.com/ddollar/ && \
mv forego-* /go/src/github.com/ddollar/forego
# Install the dependencies and make the forego executable
WORKDIR /go/src/github.com/ddollar/forego/
RUN go get -v ./... && \
CGO_ENABLED=0 GOOS=linux go build -o forego .
# Build the final image
FROM nginx:1.19.10
LABEL maintainer="Nicolas Duchon <nicolas.duchon@gmail.com> (@buchdag)"
# Install wget and install/updates certificates # Install wget and install/updates certificates
RUN apt-get update \ RUN apt-get update \
@ -12,17 +53,17 @@ RUN apt-get update \
# Configure Nginx and apply fix for very long server names # Configure Nginx and apply fix for very long server names
RUN echo "daemon off;" >> /etc/nginx/nginx.conf \ RUN echo "daemon off;" >> /etc/nginx/nginx.conf \
&& sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf && sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf \
&& sed -i 's/worker_connections 1024/worker_connections 10240/' /etc/nginx/nginx.conf
# Install Forego # Install Forego + docker-gen
ADD https://github.com/jwilder/forego/releases/download/v0.16.1/forego /usr/local/bin/forego COPY --from=forego /go/src/github.com/ddollar/forego/forego /usr/local/bin/forego
RUN chmod u+x /usr/local/bin/forego COPY --from=dockergen /usr/local/bin/docker-gen /usr/local/bin/docker-gen
ENV DOCKER_GEN_VERSION 0.7.4 # Add DOCKER_GEN_VERSION environment variable
# Because some external projects rely on it
RUN wget https://github.com/jwilder/docker-gen/releases/download/$DOCKER_GEN_VERSION/docker-gen-linux-amd64-$DOCKER_GEN_VERSION.tar.gz \ ARG DOCKER_GEN_VERSION
&& tar -C /usr/local/bin -xvzf docker-gen-linux-amd64-$DOCKER_GEN_VERSION.tar.gz \ ENV DOCKER_GEN_VERSION=${DOCKER_GEN_VERSION}
&& rm /docker-gen-linux-amd64-$DOCKER_GEN_VERSION.tar.gz
COPY network_internal.conf /etc/nginx/ COPY network_internal.conf /etc/nginx/

View file

@ -1,5 +1,46 @@
FROM nginx:1.13-alpine # setup build arguments for version of dependencies to use
LABEL maintainer="Jason Wilder mail@jasonwilder.com" ARG DOCKER_GEN_VERSION=0.7.6
ARG FOREGO_VERSION=0.16.1
# Use a specific version of golang to build both binaries
FROM golang:1.15.10-alpine as gobuilder
RUN apk add --no-cache git
# Build docker-gen from scratch
FROM gobuilder as dockergen
ARG DOCKER_GEN_VERSION
RUN git clone https://github.com/jwilder/docker-gen \
&& cd /go/docker-gen \
&& git -c advice.detachedHead=false checkout $DOCKER_GEN_VERSION \
&& go mod download \
&& CGO_ENABLED=0 go build -ldflags "-X main.buildVersion=${DOCKER_GEN_VERSION}" ./cmd/docker-gen \
&& go clean -cache \
&& mv docker-gen /usr/local/bin/ \
&& cd - \
&& rm -rf /go/docker-gen
# Build forego from scratch
FROM gobuilder as forego
# Download the sources for the given version
ARG FOREGO_VERSION
ADD https://github.com/jwilder/forego/archive/v${FOREGO_VERSION}.tar.gz sources.tar.gz
# Move the sources into the right directory
RUN tar -xzf sources.tar.gz && \
mkdir -p /go/src/github.com/ddollar/ && \
mv forego-* /go/src/github.com/ddollar/forego
# Install the dependencies and make the forego executable
WORKDIR /go/src/github.com/ddollar/forego/
RUN go get -v ./... && \
CGO_ENABLED=0 GOOS=linux go build -o forego .
# Build the final image
FROM nginx:1.19.10-alpine
LABEL maintainer="Nicolas Duchon <nicolas.duchon@gmail.com> (@buchdag)"
# Install wget and install/updates certificates # Install wget and install/updates certificates
RUN apk add --no-cache --virtual .run-deps \ RUN apk add --no-cache --virtual .run-deps \
@ -9,17 +50,17 @@ RUN apk add --no-cache --virtual .run-deps \
# Configure Nginx and apply fix for very long server names # Configure Nginx and apply fix for very long server names
RUN echo "daemon off;" >> /etc/nginx/nginx.conf \ RUN echo "daemon off;" >> /etc/nginx/nginx.conf \
&& sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf && sed -i 's/worker_processes 1/worker_processes auto/' /etc/nginx/nginx.conf \
&& sed -i 's/worker_connections 1024/worker_connections 10240/' /etc/nginx/nginx.conf
# Install Forego # Install Forego + docker-gen
ADD https://github.com/jwilder/forego/releases/download/v0.16.1/forego /usr/local/bin/forego COPY --from=forego /go/src/github.com/ddollar/forego/forego /usr/local/bin/forego
RUN chmod u+x /usr/local/bin/forego COPY --from=dockergen /usr/local/bin/docker-gen /usr/local/bin/docker-gen
ENV DOCKER_GEN_VERSION 0.7.4 # Add DOCKER_GEN_VERSION environment variable
# Because some external projects rely on it
RUN wget --quiet https://github.com/jwilder/docker-gen/releases/download/$DOCKER_GEN_VERSION/docker-gen-alpine-linux-amd64-$DOCKER_GEN_VERSION.tar.gz \ ARG DOCKER_GEN_VERSION
&& tar -C /usr/local/bin -xvzf docker-gen-alpine-linux-amd64-$DOCKER_GEN_VERSION.tar.gz \ ENV DOCKER_GEN_VERSION=${DOCKER_GEN_VERSION}
&& rm /docker-gen-alpine-linux-amd64-$DOCKER_GEN_VERSION.tar.gz
COPY network_internal.conf /etc/nginx/ COPY network_internal.conf /etc/nginx/

View file

@ -2,15 +2,19 @@
.PHONY : test-debian test-alpine test .PHONY : test-debian test-alpine test
update-dependencies: build-webserver:
test/requirements/build.sh docker build -t web test/requirements/web
test-debian: update-dependencies build-nginx-proxy-test-debian:
docker build -t jwilder/nginx-proxy:test . docker build -t nginxproxy/nginx-proxy:test .
build-nginx-proxy-test-alpine:
docker build -f Dockerfile.alpine -t nginxproxy/nginx-proxy:test .
test-debian: build-webserver build-nginx-proxy-test-debian
test/pytest.sh test/pytest.sh
test-alpine: update-dependencies test-alpine: build-webserver build-nginx-proxy-test-alpine
docker build -f Dockerfile.alpine -t jwilder/nginx-proxy:test .
test/pytest.sh test/pytest.sh
test: test-debian test-alpine test: test-debian test-alpine

139
README.md
View file

@ -1,5 +1,9 @@
![latest 0.7.0](https://img.shields.io/badge/latest-0.7.0-green.svg?style=flat) [![Test](https://github.com/nginx-proxy/nginx-proxy/actions/workflows/test.yml/badge.svg)](https://github.com/nginx-proxy/nginx-proxy/actions/workflows/test.yml)
![nginx 1.13](https://img.shields.io/badge/nginx-1.13-brightgreen.svg) ![License MIT](https://img.shields.io/badge/license-MIT-blue.svg) [![Build Status](https://travis-ci.org/jwilder/nginx-proxy.svg?branch=master)](https://travis-ci.org/jwilder/nginx-proxy) [![](https://img.shields.io/docker/stars/jwilder/nginx-proxy.svg)](https://hub.docker.com/r/jwilder/nginx-proxy 'DockerHub') [![](https://img.shields.io/docker/pulls/jwilder/nginx-proxy.svg)](https://hub.docker.com/r/jwilder/nginx-proxy 'DockerHub') [![GitHub release](https://img.shields.io/github/v/release/nginx-proxy/nginx-proxy)](https://github.com/nginx-proxy/nginx-proxy/releases)
![nginx 1.19.10](https://img.shields.io/badge/nginx-1.19.10-brightgreen.svg)
[![Docker Image Size](https://img.shields.io/docker/image-size/nginxproxy/nginx-proxy?sort=semver)](https://hub.docker.com/r/nginxproxy/nginx-proxy "Click to view the image on Docker Hub")
[![Docker stars](https://img.shields.io/docker/stars/nginxproxy/nginx-proxy.svg)](https://hub.docker.com/r/nginxproxy/nginx-proxy 'DockerHub')
[![Docker pulls](https://img.shields.io/docker/pulls/nginxproxy/nginx-proxy.svg)](https://hub.docker.com/r/nginxproxy/nginx-proxy 'DockerHub')
nginx-proxy sets up a container running nginx and [docker-gen][1]. docker-gen generates reverse proxy configs for nginx and reloads nginx when containers are started and stopped. nginx-proxy sets up a container running nginx and [docker-gen][1]. docker-gen generates reverse proxy configs for nginx and reloads nginx when containers are started and stopped.
@ -10,13 +14,13 @@ See [Automated Nginx Reverse Proxy for Docker][2] for why you might want to use
To run it: To run it:
$ docker run -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
Then start any containers you want proxied with an env var `VIRTUAL_HOST=subdomain.youdomain.com` Then start any containers you want proxied with an env var `VIRTUAL_HOST=subdomain.youdomain.com`
$ docker run -e VIRTUAL_HOST=foo.bar.com ... $ docker run -e VIRTUAL_HOST=foo.bar.com ...
The containers being proxied must [expose](https://docs.docker.com/engine/reference/run/#expose-incoming-ports) the port to be proxied, either by using the `EXPOSE` directive in their `Dockerfile` or by using the `--expose` flag to `docker run` or `docker create`. The containers being proxied must [expose](https://docs.docker.com/engine/reference/run/#expose-incoming-ports) the port to be proxied, either by using the `EXPOSE` directive in their `Dockerfile` or by using the `--expose` flag to `docker run` or `docker create` and be in the same network. By default, if you don't pass the --net flag when your nginx-proxy container is created, it will only be attached to the default bridge network. This means that it will not be able to connect to containers on networks other than bridge.
Provided your DNS is setup to forward foo.bar.com to the host running nginx-proxy, the request will be routed to a container with the VIRTUAL_HOST env var set. Provided your DNS is setup to forward foo.bar.com to the host running nginx-proxy, the request will be routed to a container with the VIRTUAL_HOST env var set.
@ -24,17 +28,17 @@ Provided your DNS is setup to forward foo.bar.com to the host running nginx-prox
The nginx-proxy images are available in two flavors. The nginx-proxy images are available in two flavors.
#### jwilder/nginx-proxy:latest #### nginxproxy/nginx-proxy:latest
This image uses the debian:jessie based nginx image. This image uses the debian:buster based nginx image.
$ docker pull jwilder/nginx-proxy:latest $ docker pull nginxproxy/nginx-proxy:latest
#### jwilder/nginx-proxy:alpine #### nginxproxy/nginx-proxy:alpine
This image is based on the nginx:alpine image. Use this image to fully support HTTP/2 (including ALPN required by recent Chrome versions). A valid certificate is required as well (see eg. below "SSL Support using letsencrypt" for more info). This image is based on the nginx:alpine image. Use this image to fully support HTTP/2 (including ALPN required by recent Chrome versions). A valid certificate is required as well (see eg. below "SSL Support using an ACME CA" for more info).
$ docker pull jwilder/nginx-proxy:alpine $ docker pull nginxproxy/nginx-proxy:alpine
### Docker Compose ### Docker Compose
@ -43,7 +47,7 @@ version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy image: nginxproxy/nginx-proxy
ports: ports:
- "80:80" - "80:80"
volumes: volumes:
@ -51,8 +55,11 @@ services:
whoami: whoami:
image: jwilder/whoami image: jwilder/whoami
expose:
- "8000"
environment: environment:
- VIRTUAL_HOST=whoami.local - VIRTUAL_HOST=whoami.local
- VIRTUAL_PORT=8000
``` ```
```shell ```shell
@ -65,7 +72,15 @@ I'm 5b129ab83266
You can activate the IPv6 support for the nginx-proxy container by passing the value `true` to the `ENABLE_IPV6` environment variable: You can activate the IPv6 support for the nginx-proxy container by passing the value `true` to the `ENABLE_IPV6` environment variable:
$ docker run -d -p 80:80 -e ENABLE_IPV6=true -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -e ENABLE_IPV6=true -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
#### Scoped IPv6 Resolvers
NginX does not support scoped IPv6 resolvers. In [docker-entrypoint.sh](./docker-entrypoint.sh) the resolvers are parsed from resolv.conf, but any scoped IPv6 addreses will be removed.
#### IPv6 NAT
By default, docker uses IPv6-to-IPv4 NAT. This means all client connections from IPv6 addresses will show docker's internal IPv4 host address. To see true IPv6 client IP addresses, you must [enable IPv6](https://docs.docker.com/config/daemon/ipv6/) and use [ipv6nat](https://github.com/robbertkl/docker-ipv6nat). You must also disable the userland proxy by adding `"userland-proxy": false` to `/etc/docker/daemon.json` and restarting the daemon.
### Multiple Ports ### Multiple Ports
@ -90,7 +105,7 @@ If you want your `nginx-proxy` container to be attached to a different network,
```console ```console
$ docker run -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro \ $ docker run -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro \
--name my-nginx-proxy --net my-network jwilder/nginx-proxy --name my-nginx-proxy --net my-network nginxproxy/nginx-proxy
$ docker network connect my-other-network my-nginx-proxy $ docker network connect my-other-network my-nginx-proxy
``` ```
@ -111,7 +126,7 @@ allow 172.16.0.0/12;
deny all; deny all;
``` ```
When internal-only access is enabled, external clients with be denied with an `HTTP 403 Forbidden` When internal-only access is enabled, external clients will be denied with an `HTTP 403 Forbidden`
> If there is a load-balancer / reverse proxy in front of `nginx-proxy` that hides the client IP (example: AWS Application/Elastic Load Balancer), you will need to use the nginx `realip` module (already installed) to extract the client's IP from the HTTP request headers. Please see the [nginx realip module configuration](http://nginx.org/en/docs/http/ngx_http_realip_module.html) for more details. This configuration can be added to a new config file and mounted in `/etc/nginx/conf.d/`. > If there is a load-balancer / reverse proxy in front of `nginx-proxy` that hides the client IP (example: AWS Application/Elastic Load Balancer), you will need to use the nginx `realip` module (already installed) to extract the client's IP from the HTTP request headers. Please see the [nginx realip module configuration](http://nginx.org/en/docs/http/ngx_http_realip_module.html) for more details. This configuration can be added to a new config file and mounted in `/etc/nginx/conf.d/`.
@ -133,7 +148,7 @@ If you would like to connect to FastCGI backend, set `VIRTUAL_PROTO=fastcgi` on
backend container. Your backend container should then listen on a port rather backend container. Your backend container should then listen on a port rather
than a socket and expose that port. than a socket and expose that port.
### FastCGI Filr Root Directory ### FastCGI File Root Directory
If you use fastcgi,you can set `VIRTUAL_ROOT=xxx` for your root directory If you use fastcgi,you can set `VIRTUAL_ROOT=xxx` for your root directory
@ -151,12 +166,15 @@ into the nginx-proxy container and be served with this "backend".
To set the default host for nginx use the env var `DEFAULT_HOST=foo.bar.com` for example To set the default host for nginx use the env var `DEFAULT_HOST=foo.bar.com` for example
$ docker run -d -p 80:80 -e DEFAULT_HOST=foo.bar.com -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -e DEFAULT_HOST=foo.bar.com -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
nginx-proxy will then redirect all requests to a container where `VIRTUAL_HOST` is set to `DEFAULT_HOST`, if they don't match any (other) `VIRTUAL_HOST`. Using the example above requests without matching `VIRTUAL_HOST` will be redirected to a plain nginx instance after running the following command:
$ docker run -d -e VIRTUAL_HOST=foo.bar.com nginx
### Separate Containers ### Separate Containers
nginx-proxy can also be run as two separate containers using the [jwilder/docker-gen](https://index.docker.io/u/jwilder/docker-gen/) nginx-proxy can also be run as two separate containers using the [jwilder/docker-gen](https://hub.docker.com/r/jwilder/docker-gen)
image and the official [nginx](https://registry.hub.docker.com/_/nginx/) image. image and the official [nginx](https://registry.hub.docker.com/_/nginx/) image.
You may want to do this to prevent having the docker socket bound to a publicly exposed container service. You may want to do this to prevent having the docker socket bound to a publicly exposed container service.
@ -169,7 +187,7 @@ $ curl -H "Host: whoami.local" localhost
I'm 5b129ab83266 I'm 5b129ab83266
``` ```
To run nginx proxy as a separate container you'll need to have [nginx.tmpl](https://github.com/jwilder/nginx-proxy/blob/master/nginx.tmpl) on your host system. To run nginx proxy as a separate container you'll need to have [nginx.tmpl](https://github.com/nginx-proxy/nginx-proxy/blob/main/nginx.tmpl) on your host system.
First start nginx with a volume: First start nginx with a volume:
@ -188,10 +206,14 @@ $ docker run --volumes-from nginx \
Finally, start your containers with `VIRTUAL_HOST` environment variables. Finally, start your containers with `VIRTUAL_HOST` environment variables.
$ docker run -e VIRTUAL_HOST=foo.bar.com ... $ docker run -e VIRTUAL_HOST=foo.bar.com ...
### SSL Support using letsencrypt ### SSL Support using an ACME CA
[letsencrypt-nginx-proxy-companion](https://github.com/JrCs/docker-letsencrypt-nginx-proxy-companion) is a lightweight companion container for the nginx-proxy. It allow the creation/renewal of Let's Encrypt certificates automatically. [acme-companion](https://github.com/nginx-proxy/acme-companion) is a lightweight companion container for the nginx-proxy. It allows the automated creation/renewal of SSL certificates using the ACME protocol.
Set `DHPARAM_GENERATION` environment variable to `false` to disabled Diffie-Hellman parameters completely. This will also ignore auto-generation made by `nginx-proxy`.
The default value is `true`
$ docker run -e DHPARAM_GENERATION=false ....
### SSL Support ### SSL Support
SSL is supported using single host, wildcard and SNI certificates using naming conventions for SSL is supported using single host, wildcard and SNI certificates using naming conventions for
@ -199,7 +221,7 @@ certificates or optionally specifying a cert name (for SNI) as an environment va
To enable SSL: To enable SSL:
$ docker run -d -p 80:80 -p 443:443 -v /path/to/certs:/etc/nginx/certs -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -p 443:443 -v /path/to/certs:/etc/nginx/certs -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
The contents of `/path/to/certs` should contain the certificates and private keys for any virtual The contents of `/path/to/certs` should contain the certificates and private keys for any virtual
hosts in use. The certificate and keys should be named after the virtual host with a `.crt` and hosts in use. The certificate and keys should be named after the virtual host with a `.crt` and
@ -223,15 +245,15 @@ at startup. Since it can take minutes to generate a new `dhparam.pem`, it is do
background. Once generation is complete, the `dhparam.pem` is saved on a persistent volume and nginx background. Once generation is complete, the `dhparam.pem` is saved on a persistent volume and nginx
is reloaded. This generation process only occurs the first time you start `nginx-proxy`. is reloaded. This generation process only occurs the first time you start `nginx-proxy`.
> COMPATIBILITY WARNING: The default generated `dhparam.pem` key is 2048 bits for A+ security. Some > COMPATIBILITY WARNING: The default generated `dhparam.pem` key is 4096 bits for A+ security. Some
> older clients (like Java 6 and 7) do not support DH keys with over 1024 bits. In order to support these > older clients (like Java 6 and 7) do not support DH keys with over 1024 bits. In order to support these
> clients, you must either provide your own `dhparam.pem`, or tell `nginx-proxy` to generate a 1024-bit > clients, you must either provide your own `dhparam.pem`, or tell `nginx-proxy` to generate a 1024-bit
> key on startup by passing `-e DHPARAM_BITS=1024`. > key on startup by passing `-e DHPARAM_BITS=1024`.
In the separate container setup, no pregenerated key will be available and neither the In the separate container setup, no pregenerated key will be available and neither the
[jwilder/docker-gen](https://index.docker.io/u/jwilder/docker-gen/) image nor the offical [jwilder/docker-gen](https://hub.docker.com/r/jwilder/docker-gen) image nor the offical
[nginx](https://registry.hub.docker.com/_/nginx/) image will generate one. If you still want A+ security [nginx](https://registry.hub.docker.com/_/nginx/) image will generate one. If you still want A+ security
in a separate container setup, you'll have to generate a 2048 bits DH key file manually and mount it on the in a separate container setup, you'll have to generate a 2048 or 4096 bits DH key file manually and mount it on the
nginx container, at `/etc/nginx/dhparam/dhparam.pem`. nginx container, at `/etc/nginx/dhparam/dhparam.pem`.
#### Wildcard Certificates #### Wildcard Certificates
@ -257,25 +279,23 @@ and OCSP Stapling is enabled.
#### How SSL Support Works #### How SSL Support Works
The default SSL cipher configuration is based on the [Mozilla intermediate profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29) which The default SSL cipher configuration is based on the [Mozilla intermediate profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29) version 5.0 which
should provide compatibility with clients back to Firefox 1, Chrome 1, IE 7, Opera 5, Safari 1, should provide compatibility with clients back to Firefox 27, Android 4.4.2, Chrome 31, Edge, IE 11 on Windows 7,
Windows XP IE8, Android 2.3, Java 7. Note that the DES-based TLS ciphers were removed for security. Java 8u31, OpenSSL 1.0.1, Opera 20, and Safari 9. Note that the DES-based TLS ciphers were removed for security.
The configuration also enables HSTS, PFS, OCSP stapling and SSL session caches. Currently TLS 1.0, 1.1 and 1.2 The configuration also enables HSTS, PFS, OCSP stapling and SSL session caches. Currently TLS 1.2 and 1.3
are supported. TLS 1.0 is deprecated but its end of life is not until June 30, 2018. It is being are supported.
included because the following browsers will stop working when it is removed: Chrome < 22, Firefox < 27,
IE < 11, Safari < 7, iOS < 5, Android Browser < 5.
If you don't require backward compatibility, you can use the [Mozilla modern profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility) If you don't require backward compatibility, you can use the [Mozilla modern profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility)
profile instead by including the environment variable `SSL_POLICY=Mozilla-Modern` to your container. profile instead by including the environment variable `SSL_POLICY=Mozilla-Modern` to the nginx-proxy container or to your container.
This profile is compatible with clients back to Firefox 27, Chrome 30, IE 11 on Windows 7, This profile is compatible with clients back to Firefox 63, Android 10.0, Chrome 70, Edge 75, Java 11,
Edge, Opera 17, Safari 9, Android 5.0, and Java 8. OpenSSL 1.1.1, Opera 57, and Safari 12.1. Note that this profile is **not** compatible with any version of Internet Explorer.
Other policies available through the `SSL_POLICY` environment variable are [`Mozilla-Old`](https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility) Other policies available through the `SSL_POLICY` environment variable are [`Mozilla-Old`](https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility)
and the [AWS ELB Security Policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html) and the [AWS ELB Security Policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html)
`AWS-TLS-1-2-2017-01`, `AWS-TLS-1-1-2017-01`, `AWS-2016-08`, `AWS-2015-05`, `AWS-2015-03` and `AWS-2015-02`. `AWS-TLS-1-2-2017-01`, `AWS-TLS-1-1-2017-01`, `AWS-2016-08`, `AWS-2015-05`, `AWS-2015-03` and `AWS-2015-02`.
Note that the `Mozilla-Old` policy should use a 1024 bits DH key for compatibility but this container generates Note that the `Mozilla-Old` policy should use a 1024 bits DH key for compatibility but this container generates
a 2048 bits key. The [Diffie-Hellman Groups](#diffie-hellman-groups) section details different methods of bypassing a 4096 bits key. The [Diffie-Hellman Groups](#diffie-hellman-groups) section details different methods of bypassing
this, either globally or per virtual-host. this, either globally or per virtual-host.
The default behavior for the proxy when port 80 and 443 are exposed is as follows: The default behavior for the proxy when port 80 and 443 are exposed is as follows:
@ -291,12 +311,12 @@ a 500.
To serve traffic in both SSL and non-SSL modes without redirecting to SSL, you can include the To serve traffic in both SSL and non-SSL modes without redirecting to SSL, you can include the
environment variable `HTTPS_METHOD=noredirect` (the default is `HTTPS_METHOD=redirect`). You can also environment variable `HTTPS_METHOD=noredirect` (the default is `HTTPS_METHOD=redirect`). You can also
disable the non-SSL site entirely with `HTTPS_METHOD=nohttp`, or disable the HTTPS site with disable the non-SSL site entirely with `HTTPS_METHOD=nohttp`, or disable the HTTPS site with
`HTTPS_METHOD=nohttps`. `HTTPS_METHOD` must be specified on each container for which you want to `HTTPS_METHOD=nohttps`. `HTTPS_METHOD` can be specified on each container for which you want to
override the default behavior. If `HTTPS_METHOD=noredirect` is used, Strict Transport Security (HSTS) override the default behavior or on the proxy container to set it globally. If `HTTPS_METHOD=noredirect` is used, Strict Transport Security (HSTS)
is disabled to prevent HTTPS users from being redirected by the client. If you cannot get to the HTTP is disabled to prevent HTTPS users from being redirected by the client. If you cannot get to the HTTP
site after changing this setting, your browser has probably cached the HSTS policy and is automatically site after changing this setting, your browser has probably cached the HSTS policy and is automatically
redirecting you back to HTTPS. You will need to clear your browser's HSTS cache or use an incognito redirecting you back to HTTPS. You will need to clear your browser's HSTS cache or use an incognito
window / different browser. window / different browser.
By default, [HTTP Strict Transport Security (HSTS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) By default, [HTTP Strict Transport Security (HSTS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security)
@ -316,7 +336,7 @@ $ docker run -d -p 80:80 -p 443:443 \
-v /path/to/htpasswd:/etc/nginx/htpasswd \ -v /path/to/htpasswd:/etc/nginx/htpasswd \
-v /path/to/certs:/etc/nginx/certs \ -v /path/to/certs:/etc/nginx/certs \
-v /var/run/docker.sock:/tmp/docker.sock:ro \ -v /var/run/docker.sock:/tmp/docker.sock:ro \
jwilder/nginx-proxy nginxproxy/nginx-proxy
``` ```
You'll need apache2-utils on the machine where you plan to create the htpasswd file. Follow these [instructions](http://httpd.apache.org/docs/2.2/programs/htpasswd.html) You'll need apache2-utils on the machine where you plan to create the htpasswd file. Follow these [instructions](http://httpd.apache.org/docs/2.2/programs/htpasswd.html)
@ -358,7 +378,7 @@ To add settings on a proxy-wide basis, add your configuration file under `/etc/n
This can be done in a derived image by creating the file in a `RUN` command or by `COPY`ing the file into `conf.d`: This can be done in a derived image by creating the file in a `RUN` command or by `COPY`ing the file into `conf.d`:
```Dockerfile ```Dockerfile
FROM jwilder/nginx-proxy FROM nginxproxy/nginx-proxy
RUN { \ RUN { \
echo 'server_tokens off;'; \ echo 'server_tokens off;'; \
echo 'client_max_body_size 100m;'; \ echo 'client_max_body_size 100m;'; \
@ -367,7 +387,7 @@ RUN { \
Or it can be done by mounting in your custom configuration in your `docker run` command: Or it can be done by mounting in your custom configuration in your `docker run` command:
$ docker run -d -p 80:80 -p 443:443 -v /path/to/my_proxy.conf:/etc/nginx/conf.d/my_proxy.conf:ro -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -p 443:443 -v /path/to/my_proxy.conf:/etc/nginx/conf.d/my_proxy.conf:ro -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
#### Per-VIRTUAL_HOST #### Per-VIRTUAL_HOST
@ -377,7 +397,7 @@ In order to allow virtual hosts to be dynamically configured as backends are add
For example, if you have a virtual host named `app.example.com`, you could provide a custom configuration for that host as follows: For example, if you have a virtual host named `app.example.com`, you could provide a custom configuration for that host as follows:
$ docker run -d -p 80:80 -p 443:443 -v /path/to/vhost.d:/etc/nginx/vhost.d:ro -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -p 443:443 -v /path/to/vhost.d:/etc/nginx/vhost.d:ro -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
$ { echo 'server_tokens off;'; echo 'client_max_body_size 100m;'; } > /path/to/vhost.d/app.example.com $ { echo 'server_tokens off;'; echo 'client_max_body_size 100m;'; } > /path/to/vhost.d/app.example.com
If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=example.com,www.example.com`), the virtual host configuration file must exist for each hostname. If you would like to use the same configuration for multiple virtual host names, you can use a symlink: If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=example.com,www.example.com`), the virtual host configuration file must exist for each hostname. If you would like to use the same configuration for multiple virtual host names, you can use a symlink:
@ -397,7 +417,7 @@ just like the previous section except with the suffix `_location`.
For example, if you have a virtual host named `app.example.com` and you have configured a proxy_cache `my-cache` in another custom file, you could tell it to use a proxy cache as follows: For example, if you have a virtual host named `app.example.com` and you have configured a proxy_cache `my-cache` in another custom file, you could tell it to use a proxy cache as follows:
$ docker run -d -p 80:80 -p 443:443 -v /path/to/vhost.d:/etc/nginx/vhost.d:ro -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy $ docker run -d -p 80:80 -p 443:443 -v /path/to/vhost.d:/etc/nginx/vhost.d:ro -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy
$ { echo 'proxy_cache my-cache;'; echo 'proxy_cache_valid 200 302 60m;'; echo 'proxy_cache_valid 404 1m;' } > /path/to/vhost.d/app.example.com_location $ { echo 'proxy_cache my-cache;'; echo 'proxy_cache_valid 200 302 60m;'; echo 'proxy_cache_valid 404 1m;' } > /path/to/vhost.d/app.example.com_location
If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=example.com,www.example.com`), the virtual host configuration file must exist for each hostname. If you would like to use the same configuration for multiple virtual host names, you can use a symlink: If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=example.com,www.example.com`), the virtual host configuration file must exist for each hostname. If you would like to use the same configuration for multiple virtual host names, you can use a symlink:
@ -416,26 +436,15 @@ Before submitting pull requests or issues, please check github to make sure an e
#### Running Tests Locally #### Running Tests Locally
To run tests, you need to prepare the docker image to test which must be tagged `jwilder/nginx-proxy:test`: To run tests, you just need to run the command below:
docker build -t jwilder/nginx-proxy:test . # build the Debian variant image
and call the [test/pytest.sh](test/pytest.sh) script.
Then build the Alpine variant of the image:
docker build -f Dockerfile.alpine -t jwilder/nginx-proxy:test . # build the Alpline variant image
and call the [test/pytest.sh](test/pytest.sh) script again.
If your system has the `make` command, you can automate those tasks by calling:
make test make test
This commands run tests on two variants of the nginx-proxy docker image: Debian and Alpine.
You can run the tests for each of these images with their respective commands:
make test-debian
make test-alpine
You can learn more about how the test suite works and how to write new tests in the [test/README.md](test/README.md) file. You can learn more about how the test suite works and how to write new tests in the [test/README.md](test/README.md) file.
### Need help?
If you have questions on how to use the image, please ask them on the [Q&A Group](https://groups.google.com/forum/#!forum/nginx-proxy)

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy image: nginxproxy/nginx-proxy
container_name: nginx-proxy container_name: nginx-proxy
ports: ports:
- "80:80" - "80:80"

View file

@ -4,10 +4,10 @@ set -e
# Warn if the DOCKER_HOST socket does not exist # Warn if the DOCKER_HOST socket does not exist
if [[ $DOCKER_HOST = unix://* ]]; then if [[ $DOCKER_HOST = unix://* ]]; then
socket_file=${DOCKER_HOST#unix://} socket_file=${DOCKER_HOST#unix://}
if ! [ -S $socket_file ]; then if ! [ -S "$socket_file" ]; then
cat >&2 <<-EOT cat >&2 <<-EOT
ERROR: you need to share your Docker host socket with a volume at $socket_file ERROR: you need to share your Docker host socket with a volume at $socket_file
Typically you should run your jwilder/nginx-proxy with: \`-v /var/run/docker.sock:$socket_file:ro\` Typically you should run your nginxproxy/nginx-proxy with: \`-v /var/run/docker.sock:$socket_file:ro\`
See the documentation at http://git.io/vZaGJ See the documentation at http://git.io/vZaGJ
EOT EOT
socketMissing=1 socketMissing=1
@ -21,18 +21,24 @@ else
fi fi
# Generate dhparam file if required # Generate dhparam file if required
# Note: if $DHPARAM_BITS is not defined, generate-dhparam.sh will use 2048 as a default /app/generate-dhparam.sh
/app/generate-dhparam.sh $DHPARAM_BITS
# Compute the DNS resolvers for use in the templates - if the IP contains ":", it's IPv6 and must be enclosed in [] # Compute the DNS resolvers for use in the templates - if the IP contains ":", it's IPv6 and must be enclosed in []
export RESOLVERS=$(awk '$1 == "nameserver" {print ($2 ~ ":")? "["$2"]": $2}' ORS=' ' /etc/resolv.conf | sed 's/ *$//g') RESOLVERS=$(awk '$1 == "nameserver" {print ($2 ~ ":")? "["$2"]": $2}' ORS=' ' /etc/resolv.conf | sed 's/ *$//g'); export RESOLVERS
if [ "x$RESOLVERS" = "x" ]; then
echo "Warning: unable to determine DNS resolvers for nginx" >&2 SCOPED_IPV6_REGEX="\[fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}\]"
unset RESOLVERS
if [ "$RESOLVERS" = "" ]; then
echo "Warning: unable to determine DNS resolvers for nginx" >&2
unset RESOLVERS
elif [[ $RESOLVERS =~ $SCOPED_IPV6_REGEX ]]; then
echo -n "Warning: Scoped IPv6 addresses removed from resolvers: " >&2
echo "$RESOLVERS" | grep -Eo "$SCOPED_IPV6_REGEX" | paste -s -d ' ' >&2
RESOLVERS=$(echo "$RESOLVERS" | sed -r "s/$SCOPED_IPV6_REGEX//g" | xargs echo -n); export RESOLVERS
fi fi
# If the user has run the default command and the socket doesn't exist, fail # If the user has run the default command and the socket doesn't exist, fail
if [ "$socketMissing" = 1 -a "$1" = forego -a "$2" = start -a "$3" = '-r' ]; then if [ "$socketMissing" = 1 ] && [ "$1" = forego ] && [ "$2" = start ] && [ "$3" = '-r' ]; then
exit 1 exit 1
fi fi

View file

@ -1,7 +1,9 @@
#!/bin/bash -e #!/bin/bash -e
# The first argument is the bit depth of the dhparam, or 2048 if unspecified # DHPARAM_BITS is the bit depth of the dhparam, or 4096 if unspecified
DHPARAM_BITS=${1:-2048} DHPARAM_BITS=${DHPARAM_BITS:-4096}
# DHPARAM_GENERATION=false skips dhparam generation
DHPARAM_GENERATION=${DHPARAM_GENERATION:-true}
# If a dhparam file is not available, use the pre-generated one and generate a new one in the background. # If a dhparam file is not available, use the pre-generated one and generate a new one in the background.
# Note that /etc/nginx/dhparam is a volume, so this dhparam will persist restarts. # Note that /etc/nginx/dhparam is a volume, so this dhparam will persist restarts.
@ -13,7 +15,7 @@ GEN_LOCKFILE="/tmp/dhparam_generating.lock"
PREGEN_HASH=$(md5sum $PREGEN_DHPARAM_FILE | cut -d" " -f1) PREGEN_HASH=$(md5sum $PREGEN_DHPARAM_FILE | cut -d" " -f1)
if [[ -f $DHPARAM_FILE ]]; then if [[ -f $DHPARAM_FILE ]]; then
CURRENT_HASH=$(md5sum $DHPARAM_FILE | cut -d" " -f1) CURRENT_HASH=$(md5sum $DHPARAM_FILE | cut -d" " -f1)
if [[ $PREGEN_HASH != $CURRENT_HASH ]]; then if [[ $PREGEN_HASH != "$CURRENT_HASH" ]]; then
# There is already a dhparam, and it's not the default # There is already a dhparam, and it's not the default
echo "Custom dhparam.pem file found, generation skipped" echo "Custom dhparam.pem file found, generation skipped"
exit 0 exit 0
@ -25,6 +27,11 @@ if [[ -f $DHPARAM_FILE ]]; then
fi fi
fi fi
if [[ $DHPARAM_GENERATION =~ ^[Ff][Aa][Ll][Ss][Ee]$ ]]; then
echo "Skipping Diffie-Hellman parameters generation and Ignoring pre-generated dhparam.pem"
exit 0
fi
cat >&2 <<-EOT cat >&2 <<-EOT
WARNING: $DHPARAM_FILE was not found. A pre-generated dhparam.pem will be used for now while a new one WARNING: $DHPARAM_FILE was not found. A pre-generated dhparam.pem will be used for now while a new one
is being generated in the background. Once the new dhparam.pem is in place, nginx will be reloaded. is being generated in the background. Once the new dhparam.pem is in place, nginx will be reloaded.
@ -37,9 +44,10 @@ touch $GEN_LOCKFILE
# Generate a new dhparam in the background in a low priority and reload nginx when finished (grep removes the progress indicator). # Generate a new dhparam in the background in a low priority and reload nginx when finished (grep removes the progress indicator).
( (
( (
nice -n +5 openssl dhparam -out $DHPARAM_FILE $DHPARAM_BITS 2>&1 \ nice -n +5 openssl dhparam -dsaparam -out $DHPARAM_FILE.tmp "$DHPARAM_BITS" 2>&1 \
&& mv $DHPARAM_FILE.tmp $DHPARAM_FILE \
&& echo "dhparam generation complete, reloading nginx" \ && echo "dhparam generation complete, reloading nginx" \
&& nginx -s reload && nginx -s reload
) | grep -vE '^[\.+]+' ) | grep -vE '^[\.+]+'
rm $GEN_LOCKFILE rm $GEN_LOCKFILE
) &disown ) & disown

View file

@ -1,5 +1,8 @@
{{ $CurrentContainer := where $ "ID" .Docker.CurrentContainerID | first }} {{ $CurrentContainer := where $ "ID" .Docker.CurrentContainerID | first }}
{{ $external_http_port := coalesce $.Env.HTTP_PORT "80" }}
{{ $external_https_port := coalesce $.Env.HTTPS_PORT "443" }}
{{ define "upstream" }} {{ define "upstream" }}
{{ if .Address }} {{ if .Address }}
{{/* If we got the containers from swarm and this container's port is published to host, use host IP:PORT */}} {{/* If we got the containers from swarm and this container's port is published to host, use host IP:PORT */}}
@ -19,7 +22,50 @@
server 127.0.0.1 down; server 127.0.0.1 down;
{{ end }} {{ end }}
{{ end }} {{ end }}
{{ end }}
{{ define "ssl_policy" }}
{{ if eq .ssl_policy "Mozilla-Modern" }}
ssl_protocols TLSv1.3;
{{/* nginx currently lacks ability to choose ciphers in TLS 1.3 in configuration, see https://trac.nginx.org/nginx/ticket/1529 /*}}
{{/* a possible workaround can be modify /etc/ssl/openssl.cnf to change it globally (see https://trac.nginx.org/nginx/ticket/1529#comment:12 ) /*}}
{{/* explicitly set ngnix default value in order to allow single servers to override the global http value */}}
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers off;
{{ else if eq .ssl_policy "Mozilla-Intermediate" }}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers off;
{{ else if eq .ssl_policy "Mozilla-Old" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-TLS-1-2-2017-01" }}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES128-SHA256:AES256-GCM-SHA384:AES256-SHA256';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-TLS-1-1-2017-01" }}
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-2016-08" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-2015-05" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DES-CBC3-SHA';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-2015-03" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DHE-DSS-AES128-SHA:DES-CBC3-SHA';
ssl_prefer_server_ciphers on;
{{ else if eq .ssl_policy "AWS-2015-02" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DHE-DSS-AES128-SHA';
ssl_prefer_server_ciphers on;
{{ end }}
{{ end }} {{ end }}
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the # If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
@ -51,8 +97,8 @@ server_names_hash_bucket_size 128;
ssl_dhparam /etc/nginx/dhparam/dhparam.pem; ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
{{ end }} {{ end }}
# Set appropriate X-Forwarded-Ssl header # Set appropriate X-Forwarded-Ssl header based on $proxy_x_forwarded_proto
map $scheme $proxy_x_forwarded_ssl { map $proxy_x_forwarded_proto $proxy_x_forwarded_ssl {
default off; default off;
https on; https on;
} }
@ -65,6 +111,10 @@ log_format vhost '$host $remote_addr - $remote_user [$time_local] '
access_log off; access_log off;
{{/* Get the SSL_POLICY defined by this container, falling back to "Mozilla-Intermediate" */}}
{{ $ssl_policy := or ($.Env.SSL_POLICY) "Mozilla-Intermediate" }}
{{ template "ssl_policy" (dict "ssl_policy" $ssl_policy) }}
{{ if $.Env.RESOLVERS }} {{ if $.Env.RESOLVERS }}
resolver {{ $.Env.RESOLVERS }}; resolver {{ $.Env.RESOLVERS }};
{{ end }} {{ end }}
@ -88,27 +138,30 @@ proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
proxy_set_header Proxy ""; proxy_set_header Proxy "";
{{ end }} {{ end }}
{{ $access_log := (or (and (not $.Env.DISABLE_ACCESS_LOGS) "access_log /var/log/nginx/access.log vhost;") "") }}
{{ $enable_ipv6 := eq (or ($.Env.ENABLE_IPV6) "") "true" }} {{ $enable_ipv6 := eq (or ($.Env.ENABLE_IPV6) "") "true" }}
server { server {
server_name _; # This is just an invalid value which will never trigger on a real hostname. server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 80; listen {{ $external_http_port }};
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:80; listen [::]:{{ $external_http_port }};
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
return 503; return 503;
} }
{{ if (and (exists "/etc/nginx/certs/default.crt") (exists "/etc/nginx/certs/default.key")) }} {{ if (and (exists "/etc/nginx/certs/default.crt") (exists "/etc/nginx/certs/default.key")) }}
server { server {
server_name _; # This is just an invalid value which will never trigger on a real hostname. server_name _; # This is just an invalid value which will never trigger on a real hostname.
listen 443 ssl http2; listen {{ $external_https_port }} ssl http2;
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:443 ssl http2; listen [::]:{{ $external_https_port }} ssl http2;
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
return 503; return 503;
ssl_session_cache shared:SSL:50m;
ssl_session_tickets off; ssl_session_tickets off;
ssl_certificate /etc/nginx/certs/default.crt; ssl_certificate /etc/nginx/certs/default.crt;
ssl_certificate_key /etc/nginx/certs/default.key; ssl_certificate_key /etc/nginx/certs/default.key;
@ -119,7 +172,7 @@ server {
{{ $host := trim $host }} {{ $host := trim $host }}
{{ $is_regexp := hasPrefix "~" $host }} {{ $is_regexp := hasPrefix "~" $host }}
{{ $upstream_name := when $is_regexp (sha1 $host) $host }} {{ $upstream_name := (print (when $is_regexp (sha1 $host) $host) "-upstream") }}
# {{ $host }} # {{ $host }}
upstream {{ $upstream_name }} { upstream {{ $upstream_name }} {
@ -161,13 +214,13 @@ upstream {{ $upstream_name }} {
{{ $network_tag := or (first (groupByKeys $containers "Env.NETWORK_ACCESS")) "external" }} {{ $network_tag := or (first (groupByKeys $containers "Env.NETWORK_ACCESS")) "external" }}
{{/* Get the HTTPS_METHOD defined by containers w/ the same vhost, falling back to "redirect" */}} {{/* Get the HTTPS_METHOD defined by containers w/ the same vhost, falling back to "redirect" */}}
{{ $https_method := or (first (groupByKeys $containers "Env.HTTPS_METHOD")) "redirect" }} {{ $https_method := or (first (groupByKeys $containers "Env.HTTPS_METHOD")) (or $.Env.HTTPS_METHOD "redirect") }}
{{/* Get the SSL_POLICY defined by containers w/ the same vhost, falling back to "Mozilla-Intermediate" */}} {{/* Get the SSL_POLICY defined by containers w/ the same vhost, falling back to empty string (use default) */}}
{{ $ssl_policy := or (first (groupByKeys $containers "Env.SSL_POLICY")) "Mozilla-Intermediate" }} {{ $ssl_policy := or (first (groupByKeys $containers "Env.SSL_POLICY")) "" }}
{{/* Get the HSTS defined by containers w/ the same vhost, falling back to "max-age=31536000" */}} {{/* Get the HSTS defined by containers w/ the same vhost, falling back to "max-age=31536000" */}}
{{ $hsts := or (first (groupByKeys $containers "Env.HSTS")) "max-age=31536000" }} {{ $hsts := or (first (groupByKeys $containers "Env.HSTS")) (or $.Env.HSTS "max-age=31536000") }}
{{/* Get the VIRTUAL_ROOT By containers w/ use fastcgi root */}} {{/* Get the VIRTUAL_ROOT By containers w/ use fastcgi root */}}
{{ $vhost_root := or (first (groupByKeys $containers "Env.VIRTUAL_ROOT")) "/var/www/public" }} {{ $vhost_root := or (first (groupByKeys $containers "Env.VIRTUAL_ROOT")) "/var/www/public" }}
@ -193,58 +246,43 @@ upstream {{ $upstream_name }} {
{{ if eq $https_method "redirect" }} {{ if eq $https_method "redirect" }}
server { server {
server_name {{ $host }}; server_name {{ $host }};
listen 80 {{ $default_server }}; listen {{ $external_http_port }} {{ $default_server }};
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:80 {{ $default_server }}; listen [::]:{{ $external_http_port }} {{ $default_server }};
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
return 301 https://$host$request_uri;
# Do not HTTPS redirect Let'sEncrypt ACME challenge
location ^~ /.well-known/acme-challenge/ {
auth_basic off;
auth_request off;
allow all;
root /usr/share/nginx/html;
try_files $uri =404;
break;
}
location / {
return 301 https://$host$request_uri;
}
} }
{{ end }} {{ end }}
server { server {
server_name {{ $host }}; server_name {{ $host }};
listen 443 ssl http2 {{ $default_server }}; listen {{ $external_https_port }} ssl http2 {{ $default_server }};
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:443 ssl http2 {{ $default_server }}; listen [::]:{{ $external_https_port }} ssl http2 {{ $default_server }};
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
{{ if eq $network_tag "internal" }} {{ if eq $network_tag "internal" }}
# Only allow traffic from internal clients # Only allow traffic from internal clients
include /etc/nginx/network_internal.conf; include /etc/nginx/network_internal.conf;
{{ end }} {{ end }}
{{ if eq $ssl_policy "Mozilla-Modern" }} {{ template "ssl_policy" (dict "ssl_policy" $ssl_policy) }}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256';
{{ else if eq $ssl_policy "Mozilla-Intermediate" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:!DSS';
{{ else if eq $ssl_policy "Mozilla-Old" }}
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:HIGH:SEED:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!RSAPSK:!aDH:!aECDH:!EDH-DSS-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA:!SRP';
{{ else if eq $ssl_policy "AWS-TLS-1-2-2017-01" }}
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES128-SHA256:AES256-GCM-SHA384:AES256-SHA256';
{{ else if eq $ssl_policy "AWS-TLS-1-1-2017-01" }}
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA';
{{ else if eq $ssl_policy "AWS-2016-08" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA';
{{ else if eq $ssl_policy "AWS-2015-05" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DES-CBC3-SHA';
{{ else if eq $ssl_policy "AWS-2015-03" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DHE-DSS-AES128-SHA:DES-CBC3-SHA';
{{ else if eq $ssl_policy "AWS-2015-02" }}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DHE-DSS-AES128-SHA';
{{ end }}
ssl_prefer_server_ciphers on;
ssl_session_timeout 5m; ssl_session_timeout 5m;
ssl_session_cache shared:SSL:50m; ssl_session_cache shared:SSL:50m;
ssl_session_tickets off; ssl_session_tickets off;
@ -262,7 +300,7 @@ server {
ssl_trusted_certificate {{ printf "/etc/nginx/certs/%s.chain.pem" $cert }}; ssl_trusted_certificate {{ printf "/etc/nginx/certs/%s.chain.pem" $cert }};
{{ end }} {{ end }}
{{ if (and (ne $https_method "noredirect") (ne $hsts "off")) }} {{ if (not (or (eq $https_method "noredirect") (eq $hsts "off"))) }}
add_header Strict-Transport-Security "{{ trim $hsts }}" always; add_header Strict-Transport-Security "{{ trim $hsts }}" always;
{{ end }} {{ end }}
@ -278,8 +316,10 @@ server {
uwsgi_pass {{ trim $proto }}://{{ trim $upstream_name }}; uwsgi_pass {{ trim $proto }}://{{ trim $upstream_name }};
{{ else if eq $proto "fastcgi" }} {{ else if eq $proto "fastcgi" }}
root {{ trim $vhost_root }}; root {{ trim $vhost_root }};
include fastcgi.conf; include fastcgi_params;
fastcgi_pass {{ trim $upstream_name }}; fastcgi_pass {{ trim $upstream_name }};
{{ else if eq $proto "grpc" }}
grpc_pass {{ trim $proto }}://{{ trim $upstream_name }};
{{ else if eq $proto "static" }} {{ else if eq $proto "static" }}
root {{ trim $vhost_root }}; root {{ trim $vhost_root }};
{{ else }} {{ else }}
@ -304,11 +344,11 @@ server {
server { server {
server_name {{ $host }}; server_name {{ $host }};
listen 80 {{ $default_server }}; listen {{ $external_http_port }} {{ $default_server }};
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:80 {{ $default_server }}; listen [::]:80 {{ $default_server }};
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
{{ if eq $network_tag "internal" }} {{ if eq $network_tag "internal" }}
# Only allow traffic from internal clients # Only allow traffic from internal clients
@ -327,8 +367,10 @@ server {
uwsgi_pass {{ trim $proto }}://{{ trim $upstream_name }}; uwsgi_pass {{ trim $proto }}://{{ trim $upstream_name }};
{{ else if eq $proto "fastcgi" }} {{ else if eq $proto "fastcgi" }}
root {{ trim $vhost_root }}; root {{ trim $vhost_root }};
include fastcgi.conf; include fastcgi_params;
fastcgi_pass {{ trim $upstream_name }}; fastcgi_pass {{ trim $upstream_name }};
{{ else if eq $proto "grpc" }}
grpc_pass {{ trim $proto }}://{{ trim $upstream_name }};
{{ else if eq $proto "static" }} {{ else if eq $proto "static" }}
root {{ trim $vhost_root }}; root {{ trim $vhost_root }};
{{ else }} {{ else }}
@ -349,11 +391,11 @@ server {
{{ if (and (not $is_https) (exists "/etc/nginx/certs/default.crt") (exists "/etc/nginx/certs/default.key")) }} {{ if (and (not $is_https) (exists "/etc/nginx/certs/default.crt") (exists "/etc/nginx/certs/default.key")) }}
server { server {
server_name {{ $host }}; server_name {{ $host }};
listen 443 ssl http2 {{ $default_server }}; listen {{ $external_https_port }} ssl http2 {{ $default_server }};
{{ if $enable_ipv6 }} {{ if $enable_ipv6 }}
listen [::]:443 ssl http2 {{ $default_server }}; listen [::]:{{ $external_https_port }} ssl http2 {{ $default_server }};
{{ end }} {{ end }}
access_log /var/log/nginx/access.log vhost; {{ $access_log }}
return 500; return 500;
ssl_certificate /etc/nginx/certs/default.crt; ssl_certificate /etc/nginx/certs/default.crt;

View file

@ -4,25 +4,20 @@ Nginx proxy test suite
Install requirements Install requirements
-------------------- --------------------
You need [python 2.7](https://www.python.org/) and [pip](https://pip.pypa.io/en/stable/installing/) installed. Then run the commands: You need [python 3.9](https://www.python.org/) and [pip](https://pip.pypa.io/en/stable/installing/) installed. Then run the commands:
requirements/build.sh
pip install -r requirements/python-requirements.txt pip install -r requirements/python-requirements.txt
If you can't install those requirements on your computer, you can alternatively use the _pytest.sh_ script which will run the tests from a Docker container which has those requirements.
Prepare the nginx-proxy test image Prepare the nginx-proxy test image
---------------------------------- ----------------------------------
docker build -t jwilder/nginx-proxy:test .. make build-nginx-proxy-test-debian
or if you want to test the alpine flavor: or if you want to test the alpine flavor:
docker build -t jwilder/nginx-proxy:test -f Dockerfile.alpine .. make build-nginx-proxy-test-alpine
make sure to tag that test image exactly `jwilder/nginx-proxy:test` or the test suite won't work.
Run the test suite Run the test suite
------------------ ------------------
@ -43,7 +38,7 @@ Run one single test module
Write a test module Write a test module
------------------- -------------------
This test suite uses [pytest](http://doc.pytest.org/en/latest/). The [conftest.py](conftest.py) file will be automatically loaded by pytest and will provide you with two useful pytest [fixtures](http://doc.pytest.org/en/latest/fixture.html#fixture): This test suite uses [pytest](http://doc.pytest.org/en/latest/). The [conftest.py](conftest.py) file will be automatically loaded by pytest and will provide you with two useful pytest [fixtures](https://docs.pytest.org/en/latest/explanation/fixtures.html):
- docker_compose - docker_compose
- nginxproxy - nginxproxy
@ -61,11 +56,11 @@ The fixture will run the _docker-compose_ command with the `-f` option to load t
In the case you are running pytest from within a docker container, the `docker_compose` fixture will make sure the container running pytest is attached to all docker networks. That way, your test will be able to reach any of them. In the case you are running pytest from within a docker container, the `docker_compose` fixture will make sure the container running pytest is attached to all docker networks. That way, your test will be able to reach any of them.
In your tests, you can use the `docker_compose` variable to query and command the docker daemon as it provides you with a [client from the docker python module](https://docker-py.readthedocs.io/en/2.0.2/client.html#client-reference). In your tests, you can use the `docker_compose` variable to query and command the docker daemon as it provides you with a [client from the docker python module](https://docker-py.readthedocs.io/en/4.4.4/client.html#client-reference).
Also this fixture alters the way the python interpreter resolves domain names to IP addresses in the following ways: Also this fixture alters the way the python interpreter resolves domain names to IP addresses in the following ways:
Any domain name containing the substring `nginx-proxy` will resolve to the IP address of the container that was created from the `jwilder/nginx-proxy:test` image. So all the following domain names will resolve to the nginx-proxy container in tests: Any domain name containing the substring `nginx-proxy` will resolve to the IP address of the container that was created from the `nginxproxy/nginx-proxy:test` image. So all the following domain names will resolve to the nginx-proxy container in tests:
- `nginx-proxy` - `nginx-proxy`
- `nginx-proxy.com` - `nginx-proxy.com`
- `www.nginx-proxy.com` - `www.nginx-proxy.com`
@ -99,9 +94,8 @@ Furthermore, the nginxproxy methods accept an additional keyword parameter: `ipv
### The web docker image ### The web docker image
When you ran the `requirements/build.sh` script earlier, you built a [`web`](requirements/README.md) docker image which is convenient for running a small web server in a container. This image can produce containers that listens on multiple ports at the same time. When you run the `make build-webserver` command, you built a [`web`](requirements/README.md) docker image which is convenient for running a small web server in a container. This image can produce containers that listens on multiple ports at the same time.
### Testing TLS ### Testing TLS
If you need to create server certificates, use the [`certs/create_server_certificate.sh`](certs/) script. Pytest will be able to validate any certificate issued from this script. If you need to create server certificates, use the [`certs/create_server_certificate.sh`](certs/) script. Pytest will be able to validate any certificate issued from this script.

View file

@ -24,7 +24,7 @@ fi
# Create a nginx container (which conveniently provides the `openssl` command) # Create a nginx container (which conveniently provides the `openssl` command)
############################################################################### ###############################################################################
CONTAINER=$(docker run -d -v $DIR:/work -w /work -e SAN="$ALTERNATE_DOMAINS" nginx:1.13) CONTAINER=$(docker run -d -v $DIR:/work -w /work -e SAN="$ALTERNATE_DOMAINS" nginx:1.19.10)
# Configure openssl # Configure openssl
docker exec $CONTAINER bash -c ' docker exec $CONTAINER bash -c '
mkdir -p /ca/{certs,crl,private,newcerts} 2>/dev/null mkdir -p /ca/{certs,crl,private,newcerts} 2>/dev/null

View file

@ -1,4 +1,3 @@
from __future__ import print_function
import contextlib import contextlib
import logging import logging
import os import os
@ -68,11 +67,11 @@ class requests_for_docker(object):
""" """
Return the nginx config file Return the nginx config file
""" """
nginx_proxy_containers = docker_client.containers.list(filters={"ancestor": "jwilder/nginx-proxy:test"}) nginx_proxy_containers = docker_client.containers.list(filters={"ancestor": "nginxproxy/nginx-proxy:test"})
if len(nginx_proxy_containers) > 1: if len(nginx_proxy_containers) > 1:
pytest.fail("Too many running jwilder/nginx-proxy:test containers", pytrace=False) pytest.fail("Too many running nginxproxy/nginx-proxy:test containers", pytrace=False)
elif len(nginx_proxy_containers) == 0: elif len(nginx_proxy_containers) == 0:
pytest.fail("No running jwilder/nginx-proxy:test container", pytrace=False) pytest.fail("No running nginxproxy/nginx-proxy:test container", pytrace=False)
return get_nginx_conf_from_container(nginx_proxy_containers[0]) return get_nginx_conf_from_container(nginx_proxy_containers[0])
def get(self, *args, **kwargs): def get(self, *args, **kwargs):
@ -133,7 +132,7 @@ def container_ip(container):
pytest.skip("This system does not support IPv6") pytest.skip("This system does not support IPv6")
ip = container_ipv6(container) ip = container_ipv6(container)
if ip == '': if ip == '':
pytest.skip("Container %s has no IPv6 address" % container.name) pytest.skip(f"Container {container.name} has no IPv6 address")
else: else:
return ip return ip
else: else:
@ -142,7 +141,7 @@ def container_ip(container):
return net_info["bridge"]["IPAddress"] return net_info["bridge"]["IPAddress"]
# not default bridge network, fallback on first network defined # not default bridge network, fallback on first network defined
network_name = net_info.keys()[0] network_name = list(net_info.keys())[0]
return net_info[network_name]["IPAddress"] return net_info[network_name]["IPAddress"]
@ -155,27 +154,27 @@ def container_ipv6(container):
return net_info["bridge"]["GlobalIPv6Address"] return net_info["bridge"]["GlobalIPv6Address"]
# not default bridge network, fallback on first network defined # not default bridge network, fallback on first network defined
network_name = net_info.keys()[0] network_name = list(net_info.keys())[0]
return net_info[network_name]["GlobalIPv6Address"] return net_info[network_name]["GlobalIPv6Address"]
def nginx_proxy_dns_resolver(domain_name): def nginx_proxy_dns_resolver(domain_name):
""" """
if "nginx-proxy" if found in host, return the ip address of the docker container if "nginx-proxy" if found in host, return the ip address of the docker container
issued from the docker image jwilder/nginx-proxy:test. issued from the docker image nginxproxy/nginx-proxy:test.
:return: IP or None :return: IP or None
""" """
log = logging.getLogger('DNS') log = logging.getLogger('DNS')
log.debug("nginx_proxy_dns_resolver(%r)" % domain_name) log.debug(f"nginx_proxy_dns_resolver({domain_name!r})")
if 'nginx-proxy' in domain_name: if 'nginx-proxy' in domain_name:
nginxproxy_containers = docker_client.containers.list(filters={"status": "running", "ancestor": "jwilder/nginx-proxy:test"}) nginxproxy_containers = docker_client.containers.list(filters={"status": "running", "ancestor": "nginxproxy/nginx-proxy:test"})
if len(nginxproxy_containers) == 0: if len(nginxproxy_containers) == 0:
log.warn("no container found from image jwilder/nginx-proxy:test while resolving %r", domain_name) log.warn(f"no container found from image nginxproxy/nginx-proxy:test while resolving {domain_name!r}")
return return
nginxproxy_container = nginxproxy_containers[0] nginxproxy_container = nginxproxy_containers[0]
ip = container_ip(nginxproxy_container) ip = container_ip(nginxproxy_container)
log.info("resolving domain name %r as IP address %s of nginx-proxy container %s" % (domain_name, ip, nginxproxy_container.name)) log.info(f"resolving domain name {domain_name!r} as IP address {ip} of nginx-proxy container {nginxproxy_container.name}")
return ip return ip
def docker_container_dns_resolver(domain_name): def docker_container_dns_resolver(domain_name):
@ -186,24 +185,24 @@ def docker_container_dns_resolver(domain_name):
:return: IP or None :return: IP or None
""" """
log = logging.getLogger('DNS') log = logging.getLogger('DNS')
log.debug("docker_container_dns_resolver(%r)" % domain_name) log.debug(f"docker_container_dns_resolver({domain_name!r})")
match = re.search('(^|.+\.)(?P<container>[^.]+)\.container\.docker$', domain_name) match = re.search(r'(^|.+\.)(?P<container>[^.]+)\.container\.docker$', domain_name)
if not match: if not match:
log.debug("%r does not match" % domain_name) log.debug(f"{domain_name!r} does not match")
return return
container_name = match.group('container') container_name = match.group('container')
log.debug("looking for container %r" % container_name) log.debug(f"looking for container {container_name!r}")
try: try:
container = docker_client.containers.get(container_name) container = docker_client.containers.get(container_name)
except docker.errors.NotFound: except docker.errors.NotFound:
log.warn("container named %r not found while resolving %r" % (container_name, domain_name)) log.warn(f"container named {container_name!r} not found while resolving {domain_name!r}")
return return
log.debug("container %r found (%s)" % (container.name, container.short_id)) log.debug(f"container {container.name!r} found ({container.short_id})")
ip = container_ip(container) ip = container_ip(container)
log.info("resolving domain name %r as IP address %s of container %s" % (domain_name, ip, container.name)) log.info(f"resolving domain name {domain_name!r} as IP address {ip} of container {container.name}")
return ip return ip
@ -211,12 +210,12 @@ def monkey_patch_urllib_dns_resolver():
""" """
Alter the behavior of the urllib DNS resolver so that any domain name Alter the behavior of the urllib DNS resolver so that any domain name
containing substring 'nginx-proxy' will resolve to the IP address containing substring 'nginx-proxy' will resolve to the IP address
of the container created from image 'jwilder/nginx-proxy:test'. of the container created from image 'nginxproxy/nginx-proxy:test'.
""" """
prv_getaddrinfo = socket.getaddrinfo prv_getaddrinfo = socket.getaddrinfo
dns_cache = {} dns_cache = {}
def new_getaddrinfo(*args): def new_getaddrinfo(*args):
logging.getLogger('DNS').debug("resolving domain name %s" % repr(args)) logging.getLogger('DNS').debug(f"resolving domain name {repr(args)}")
_args = list(args) _args = list(args)
# custom DNS resolvers # custom DNS resolvers
@ -244,7 +243,7 @@ def remove_all_containers():
for container in docker_client.containers.list(all=True): for container in docker_client.containers.list(all=True):
if I_AM_RUNNING_INSIDE_A_DOCKER_CONTAINER and container.id.startswith(socket.gethostname()): if I_AM_RUNNING_INSIDE_A_DOCKER_CONTAINER and container.id.startswith(socket.gethostname()):
continue # pytest is running within a Docker container, so we do not want to remove that particular container continue # pytest is running within a Docker container, so we do not want to remove that particular container
logging.info("removing container %s" % container.name) logging.info(f"removing container {container.name}")
container.remove(v=True, force=True) container.remove(v=True, force=True)
@ -253,40 +252,43 @@ def get_nginx_conf_from_container(container):
return the nginx /etc/nginx/conf.d/default.conf file content from a container return the nginx /etc/nginx/conf.d/default.conf file content from a container
""" """
import tarfile import tarfile
from cStringIO import StringIO from io import BytesIO
strm, stat = container.get_archive('/etc/nginx/conf.d/default.conf')
with tarfile.open(fileobj=StringIO(strm.read())) as tf: strm_generator, stat = container.get_archive('/etc/nginx/conf.d/default.conf')
strm_fileobj = BytesIO(b"".join(strm_generator))
with tarfile.open(fileobj=strm_fileobj) as tf:
conffile = tf.extractfile('default.conf') conffile = tf.extractfile('default.conf')
return conffile.read() return conffile.read()
def docker_compose_up(compose_file='docker-compose.yml'): def docker_compose_up(compose_file='docker-compose.yml'):
logging.info('docker-compose -f %s up -d' % compose_file) logging.info(f'docker-compose -f {compose_file} up -d')
try: try:
subprocess.check_output(shlex.split('docker-compose -f %s up -d' % compose_file), stderr=subprocess.STDOUT) subprocess.check_output(shlex.split(f'docker-compose -f {compose_file} up -d'), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
pytest.fail("Error while runninng 'docker-compose -f %s up -d':\n%s" % (compose_file, e.output), pytrace=False) pytest.fail(f"Error while runninng 'docker-compose -f {compose_file} up -d':\n{e.output}", pytrace=False)
def docker_compose_down(compose_file='docker-compose.yml'): def docker_compose_down(compose_file='docker-compose.yml'):
logging.info('docker-compose -f %s down' % compose_file) logging.info(f'docker-compose -f {compose_file} down')
try: try:
subprocess.check_output(shlex.split('docker-compose -f %s down' % compose_file), stderr=subprocess.STDOUT) subprocess.check_output(shlex.split(f'docker-compose -f {compose_file} down'), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
pytest.fail("Error while runninng 'docker-compose -f %s down':\n%s" % (compose_file, e.output), pytrace=False) pytest.fail(f"Error while runninng 'docker-compose -f {compose_file} down':\n{e.output}", pytrace=False)
def wait_for_nginxproxy_to_be_ready(): def wait_for_nginxproxy_to_be_ready():
""" """
If one (and only one) container started from image jwilder/nginx-proxy:test is found, If one (and only one) container started from image nginxproxy/nginx-proxy:test is found,
wait for its log to contain substring "Watching docker events" wait for its log to contain substring "Watching docker events"
""" """
containers = docker_client.containers.list(filters={"ancestor": "jwilder/nginx-proxy:test"}) containers = docker_client.containers.list(filters={"ancestor": "nginxproxy/nginx-proxy:test"})
if len(containers) != 1: if len(containers) != 1:
return return
container = containers[0] container = containers[0]
for line in container.logs(stream=True): for line in container.logs(stream=True):
if "Watching docker events" in line: if b"Watching docker events" in line:
logging.debug("nginx-proxy ready") logging.debug("nginx-proxy ready")
break break
@ -307,7 +309,7 @@ def find_docker_compose_file(request):
if docker_compose_file_module_variable is not None: if docker_compose_file_module_variable is not None:
docker_compose_file = os.path.join( test_module_dir, docker_compose_file_module_variable) docker_compose_file = os.path.join( test_module_dir, docker_compose_file_module_variable)
if not os.path.isfile(docker_compose_file): if not os.path.isfile(docker_compose_file):
raise ValueError("docker compose file %r could not be found. Check your test module `docker_compose_file` variable value." % docker_compose_file) raise ValueError(f"docker compose file {docker_compose_file!r} could not be found. Check your test module `docker_compose_file` variable value.")
else: else:
if os.path.isfile(yml_file): if os.path.isfile(yml_file):
docker_compose_file = yml_file docker_compose_file = yml_file
@ -319,7 +321,7 @@ def find_docker_compose_file(request):
if not os.path.isfile(docker_compose_file): if not os.path.isfile(docker_compose_file):
logging.error("Could not find any docker-compose file named either '{0}.yml', '{0}.yaml' or 'docker-compose.yml'".format(request.module.__name__)) logging.error("Could not find any docker-compose file named either '{0}.yml', '{0}.yaml' or 'docker-compose.yml'".format(request.module.__name__))
logging.debug("using docker compose file %s" % docker_compose_file) logging.debug(f"using docker compose file {docker_compose_file}")
return docker_compose_file return docker_compose_file
@ -333,15 +335,15 @@ def connect_to_network(network):
try: try:
my_container = docker_client.containers.get(socket.gethostname()) my_container = docker_client.containers.get(socket.gethostname())
except docker.errors.NotFound: except docker.errors.NotFound:
logging.warn("container %r not found" % socket.gethostname()) logging.warn(f"container {socket.gethostname()!r} not found")
return return
# figure out our container networks # figure out our container networks
my_networks = my_container.attrs["NetworkSettings"]["Networks"].keys() my_networks = list(my_container.attrs["NetworkSettings"]["Networks"].keys())
# make sure our container is connected to the nginx-proxy's network # make sure our container is connected to the nginx-proxy's network
if network not in my_networks: if network not in my_networks:
logging.info("Connecting to docker network: %s" % network.name) logging.info(f"Connecting to docker network: {network.name}")
network.connect(my_container) network.connect(my_container)
return network return network
@ -356,15 +358,15 @@ def disconnect_from_network(network=None):
try: try:
my_container = docker_client.containers.get(socket.gethostname()) my_container = docker_client.containers.get(socket.gethostname())
except docker.errors.NotFound: except docker.errors.NotFound:
logging.warn("container %r not found" % socket.gethostname()) logging.warn(f"container {socket.gethostname()!r} not found")
return return
# figure out our container networks # figure out our container networks
my_networks_names = my_container.attrs["NetworkSettings"]["Networks"].keys() my_networks_names = list(my_container.attrs["NetworkSettings"]["Networks"].keys())
# disconnect our container from the given network # disconnect our container from the given network
if network.name in my_networks_names: if network.name in my_networks_names:
logging.info("Disconnecting from network %s" % network.name) logging.info(f"Disconnecting from network {network.name}")
network.disconnect(my_container) network.disconnect(my_container)
@ -378,7 +380,7 @@ def connect_to_all_networks():
return [] return []
else: else:
# find the list of docker networks # find the list of docker networks
networks = filter(lambda network: len(network.containers) > 0 and network.name != 'bridge', docker_client.networks.list()) networks = [network for network in docker_client.networks.list() if len(network.containers) > 0 and network.name != 'bridge']
return [connect_to_network(network) for network in networks] return [connect_to_network(network) for network in networks]
@ -388,7 +390,7 @@ def connect_to_all_networks():
# #
############################################################################### ###############################################################################
@pytest.yield_fixture(scope="module") @pytest.fixture(scope="module")
def docker_compose(request): def docker_compose(request):
""" """
pytest fixture providing containers described in a docker compose file. After the tests, remove the created containers pytest fixture providing containers described in a docker compose file. After the tests, remove the created containers
@ -412,7 +414,7 @@ def docker_compose(request):
restore_urllib_dns_resolver(original_dns_resolver) restore_urllib_dns_resolver(original_dns_resolver)
@pytest.yield_fixture() @pytest.fixture()
def nginxproxy(): def nginxproxy():
""" """
Provides the `nginxproxy` object that can be used in the same way the requests module is: Provides the `nginxproxy` object that can be used in the same way the requests module is:
@ -439,7 +441,7 @@ def nginxproxy():
def pytest_runtest_logreport(report): def pytest_runtest_logreport(report):
if report.failed: if report.failed:
if isinstance(report.longrepr, ReprExceptionInfo): if isinstance(report.longrepr, ReprExceptionInfo):
test_containers = docker_client.containers.list(all=True, filters={"ancestor": "jwilder/nginx-proxy:test"}) test_containers = docker_client.containers.list(all=True, filters={"ancestor": "nginxproxy/nginx-proxy:test"})
for container in test_containers: for container in test_containers:
report.longrepr.addsection('nginx-proxy logs', container.logs()) report.longrepr.addsection('nginx-proxy logs', container.logs())
report.longrepr.addsection('nginx-proxy conf', get_nginx_conf_from_container(container)) report.longrepr.addsection('nginx-proxy conf', get_nginx_conf_from_container(container))
@ -456,7 +458,7 @@ def pytest_runtest_makereport(item, call):
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
previousfailed = getattr(item.parent, "_previousfailed", None) previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None: if previousfailed is not None:
pytest.xfail("previous test failed (%s)" % previousfailed.name) pytest.xfail(f"previous test failed ({previousfailed.name})")
############################################################################### ###############################################################################
# #
@ -465,9 +467,9 @@ def pytest_runtest_setup(item):
############################################################################### ###############################################################################
try: try:
docker_client.images.get('jwilder/nginx-proxy:test') docker_client.images.get('nginxproxy/nginx-proxy:test')
except docker.errors.ImageNotFound: except docker.errors.ImageNotFound:
pytest.exit("The docker image 'jwilder/nginx-proxy:test' is missing") pytest.exit("The docker image 'nginxproxy/nginx-proxy:test' is missing")
if docker.__version__ != "2.1.0": if docker.__version__ != "4.4.4":
pytest.exit("This test suite is meant to work with the python docker module v2.1.0") pytest.exit("This test suite is meant to work with the python docker module v4.4.4")

View file

@ -1,3 +1,5 @@
[pytest] [pytest]
# disable the creation of the `.cache` folders # disable the creation of the `.cache` folders
addopts = -p no:cacheprovider --ignore=requirements --ignore=certs -r s -v addopts = -p no:cacheprovider --ignore=requirements --ignore=certs -r s -v
markers =
incremental: mark a test as incremental.

View file

@ -1,7 +1,4 @@
FROM python:2.7-alpine FROM python:3.9
# Note: we're using alpine because it has openssl 1.0.2, which we need for testing
RUN apk add --update bash openssl curl && rm -rf /var/cache/apk/*
COPY python-requirements.txt /requirements.txt COPY python-requirements.txt /requirements.txt
RUN pip install -r /requirements.txt RUN pip install -r /requirements.txt

View file

@ -2,7 +2,7 @@ This directory contains resources to build Docker images tests depend on
# Build images # Build images
./build.sh make build-webserver
# python-requirements.txt # python-requirements.txt

View file

@ -1,6 +0,0 @@
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
docker build -t web $DIR/web

View file

@ -1,5 +1,5 @@
backoff==1.3.2 backoff==1.10.0
docker-compose==1.11.2 docker-compose==1.28.5
docker==2.1.0 docker==4.4.4
pytest==3.0.5 pytest==6.2.2
requests==2.11.1 requests==2.25.1

View file

@ -13,13 +13,13 @@ class Handler(http.server.SimpleHTTPRequestHandler):
if self.path == "/headers": if self.path == "/headers":
response_body += self.headers.as_string() response_body += self.headers.as_string()
elif self.path == "/port": elif self.path == "/port":
response_body += "answer from port %s\n" % PORT response_body += f"answer from port {PORT}\n"
elif re.match("/status/(\d+)", self.path): elif re.match("/status/(\d+)", self.path):
result = re.match("/status/(\d+)", self.path) result = re.match("/status/(\d+)", self.path)
response_code = int(result.group(1)) response_code = int(result.group(1))
response_body += "answer with response code %s\n" % response_code response_body += f"answer with response code {response_code}\n"
elif self.path == "/": elif self.path == "/":
response_body += "I'm %s\n" % os.environ['HOSTNAME'] response_body += f"I'm {os.environ['HOSTNAME']}\n"
else: else:
response_body += "No route for this path!\n" response_body += "No route for this path!\n"
response_code = 404 response_code = 404

View file

@ -8,7 +8,7 @@ web:
reverseproxy: reverseproxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
container_name: reverseproxy container_name: reverseproxy
environment: environment:
DEBUG: "true" DEBUG: "true"

View file

@ -12,7 +12,7 @@ script_dir = os.path.dirname(__file__)
pytestmark = pytest.mark.xfail() # TODO delete this marker once those issues are fixed pytestmark = pytest.mark.xfail() # TODO delete this marker once those issues are fixed
@pytest.yield_fixture(scope="module", autouse=True) @pytest.fixture(scope="module", autouse=True)
def certs(): def certs():
""" """
pytest fixture that provides cert and key files into the tmp_certs directory pytest fixture that provides cert and key files into the tmp_certs directory
@ -43,7 +43,7 @@ def test_http_web_is_301(docker_compose, nginxproxy):
def test_https_web_is_200(docker_compose, nginxproxy): def test_https_web_is_200(docker_compose, nginxproxy):
r = nginxproxy.get("https://web.nginx-proxy/port") r = nginxproxy.get("https://web.nginx-proxy/port")
assert r.status_code == 200 assert r.status_code == 200
assert 'answer from port 81\n' in r.text assert "answer from port 81\n" in r.text
@pytest.mark.incremental @pytest.mark.incremental

View file

@ -6,7 +6,7 @@ Furthermore, if the nginx-proxy in such state is restarted, the nginx process wi
In the generated nginx config file, we can notice the presence of an empty `upstream {}` block. In the generated nginx config file, we can notice the presence of an empty `upstream {}` block.
This can be fixed by merging [PR-585](https://github.com/jwilder/nginx-proxy/pull/585). This can be fixed by merging [PR-585](https://github.com/nginx-proxy/nginx-proxy/pull/585).
## How to reproduce ## How to reproduce

View file

@ -9,7 +9,7 @@ services:
container_name: reverseproxy container_name: reverseproxy
networks: networks:
- netA - netA
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro

View file

@ -16,7 +16,7 @@ web2:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/f00.sock:ro - /var/run/docker.sock:/f00.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -1,5 +1,5 @@
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -19,4 +19,4 @@ def test_custom_conf_does_not_apply_to_web2(docker_compose, nginxproxy):
assert "X-test" not in r.headers assert "X-test" not in r.headers
def test_custom_block_is_present_in_nginx_generated_conf(docker_compose, nginxproxy): def test_custom_block_is_present_in_nginx_generated_conf(docker_compose, nginxproxy):
assert "include /etc/nginx/vhost.d/web1.nginx-proxy.local_location;" in nginxproxy.get_conf() assert b"include /etc/nginx/vhost.d/web1.nginx-proxy.local_location;" in nginxproxy.get_conf()

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -1,7 +1,7 @@
version: '2' version: '2'
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -10,7 +10,7 @@ web1:
# WHEN nginx-proxy runs with DEFAULT_HOST set to web1.tld # WHEN nginx-proxy runs with DEFAULT_HOST set to web1.tld
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -4,23 +4,27 @@ import logging
import pytest import pytest
@pytest.yield_fixture(scope="module") @pytest.fixture(scope="module")
def nginx_tmpl(): def nginx_tmpl():
""" """
pytest fixture which extracts the the nginx config template from pytest fixture which extracts the the nginx config template from
the jwilder/nginx-proxy:test image the nginxproxy/nginx-proxy:test image
""" """
script_dir = os.path.dirname(__file__) script_dir = os.path.dirname(__file__)
logging.info("extracting nginx.tmpl from jwilder/nginx-proxy:test") logging.info("extracting nginx.tmpl from nginxproxy/nginx-proxy:test")
docker_client = docker.from_env() docker_client = docker.from_env()
print(docker_client.containers.run( print(
image='jwilder/nginx-proxy:test', docker_client.containers.run(
remove=True, image="nginxproxy/nginx-proxy:test",
volumes=['{current_dir}:{current_dir}'.format(current_dir=script_dir)], remove=True,
entrypoint='sh', volumes=["{current_dir}:{current_dir}".format(current_dir=script_dir)],
command='-xc "cp /app/nginx.tmpl {current_dir} && chmod 777 {current_dir}/nginx.tmpl"'.format( entrypoint="sh",
current_dir=script_dir), command='-xc "cp /app/nginx.tmpl {current_dir} && chmod 777 {current_dir}/nginx.tmpl"'.format(
stderr=True)) current_dir=script_dir
),
stderr=True,
)
)
yield yield
logging.info("removing nginx.tmpl") logging.info("removing nginx.tmpl")
os.remove(os.path.join(script_dir, "nginx.tmpl")) os.remove(os.path.join(script_dir, "nginx.tmpl"))
@ -35,4 +39,4 @@ def test_forwards_to_whoami(nginx_tmpl, docker_compose, nginxproxy):
r = nginxproxy.get("http://whoami.nginx.container.docker/") r = nginxproxy.get("http://whoami.nginx.container.docker/")
assert r.status_code == 200 assert r.status_code == 200
whoami_container = docker_compose.containers.get("whoami") whoami_container = docker_compose.containers.get("whoami")
assert r.text == "I'm %s\n" % whoami_container.id[:12] assert r.text == f"I'm {whoami_container.id[:12]}\n"

View file

@ -3,47 +3,37 @@ import docker
import logging import logging
import pytest import pytest
import re import re
from distutils.version import LooseVersion
def versiontuple(v):
"""
>>> versiontuple("1.12.3")
(1, 12, 3)
>>> versiontuple("1.13.0")
(1, 13, 0)
>>> versiontuple("17.03.0-ce")
(17, 3, 0)
>>> versiontuple("17.03.0-ce") < (1, 13)
False
"""
return tuple(map(int, (v.split('-')[0].split("."))))
raw_version = docker.from_env().version()['Version'] raw_version = docker.from_env().version()["Version"]
pytestmark = pytest.mark.skipif( pytestmark = pytest.mark.skipif(
versiontuple(raw_version) < (1, 13), LooseVersion(raw_version) < LooseVersion("1.13"),
reason="Docker compose syntax v3 requires docker engine v1.13 or later (got %s)" % raw_version) reason="Docker compose syntax v3 requires docker engine v1.13 or later (got {raw_version})"
)
@pytest.yield_fixture(scope="module") @pytest.fixture(scope="module")
def nginx_tmpl(): def nginx_tmpl():
""" """
pytest fixture which extracts the the nginx config template from pytest fixture which extracts the the nginx config template from
the jwilder/nginx-proxy:test image the nginxproxy/nginx-proxy:test image
""" """
script_dir = os.path.dirname(__file__) script_dir = os.path.dirname(__file__)
logging.info("extracting nginx.tmpl from jwilder/nginx-proxy:test") logging.info("extracting nginx.tmpl from nginxproxy/nginx-proxy:test")
docker_client = docker.from_env() docker_client = docker.from_env()
print(docker_client.containers.run( print(
image='jwilder/nginx-proxy:test', docker_client.containers.run(
remove=True, image="nginxproxy/nginx-proxy:test",
volumes=['{current_dir}:{current_dir}'.format(current_dir=script_dir)], remove=True,
entrypoint='sh', volumes=["{current_dir}:{current_dir}".format(current_dir=script_dir)],
command='-xc "cp /app/nginx.tmpl {current_dir} && chmod 777 {current_dir}/nginx.tmpl"'.format( entrypoint="sh",
current_dir=script_dir), command='-xc "cp /app/nginx.tmpl {current_dir} && chmod 777 {current_dir}/nginx.tmpl"'.format(
stderr=True)) current_dir=script_dir
),
stderr=True,
)
)
yield yield
logging.info("removing nginx.tmpl") logging.info("removing nginx.tmpl")
os.remove(os.path.join(script_dir, "nginx.tmpl")) os.remove(os.path.join(script_dir, "nginx.tmpl"))
@ -58,9 +48,9 @@ def test_forwards_to_whoami(nginx_tmpl, docker_compose, nginxproxy):
r = nginxproxy.get("http://whoami.nginx.container.docker/") r = nginxproxy.get("http://whoami.nginx.container.docker/")
assert r.status_code == 200 assert r.status_code == 200
whoami_container = docker_compose.containers.get("whoami") whoami_container = docker_compose.containers.get("whoami")
assert r.text == "I'm %s\n" % whoami_container.id[:12] assert r.text == f"I'm {whoami_container.id[:12]}\n"
if __name__ == '__main__': if __name__ == "__main__":
import doctest import doctest
doctest.testmod() doctest.testmod()

View file

@ -7,7 +7,7 @@ import pytest
from docker.errors import NotFound from docker.errors import NotFound
@pytest.yield_fixture() @pytest.fixture()
def web1(docker_compose): def web1(docker_compose):
""" """
pytest fixture creating a web container with `VIRTUAL_HOST=web1.nginx-proxy` listening on port 81. pytest fixture creating a web container with `VIRTUAL_HOST=web1.nginx-proxy` listening on port 81.

View file

@ -1,5 +1,5 @@
nginxproxy: nginxproxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -8,7 +8,7 @@ web:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -8,7 +8,7 @@ web:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./certs/web.nginx-proxy.tld.crt:/etc/nginx/certs/web.nginx-proxy.tld.crt:ro - ./certs/web.nginx-proxy.tld.crt:/etc/nginx/certs/web.nginx-proxy.tld.crt:ro

View file

@ -16,7 +16,7 @@ web2:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -8,7 +8,7 @@ web:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -6,7 +6,7 @@ networks:
services: services:
nginx-proxy: nginx-proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -9,7 +9,7 @@ web:
VIRTUAL_PORT: 90 VIRTUAL_PORT: 90
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -8,7 +8,7 @@ web:
VIRTUAL_HOST: "web.nginx-proxy.tld" VIRTUAL_HOST: "web.nginx-proxy.tld"
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -8,7 +8,7 @@ web:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -16,7 +16,7 @@ web2:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -26,7 +26,7 @@ def assert_log_contains(expected_log_line):
""" """
sut_container = docker_client.containers.get("nginxproxy") sut_container = docker_client.containers.get("nginxproxy")
docker_logs = sut_container.logs(stdout=True, stderr=True, stream=False, follow=False) docker_logs = sut_container.logs(stdout=True, stderr=True, stream=False, follow=False)
assert expected_log_line in docker_logs assert bytes(expected_log_line, encoding="utf8") in docker_logs
def require_openssl(required_version): def require_openssl(required_version):
@ -42,7 +42,7 @@ def require_openssl(required_version):
""" """
def versiontuple(v): def versiontuple(v):
clean_v = re.sub("[^\d\.]", "", v) clean_v = re.sub(r"[^\d\.]", "", v)
return tuple(map(int, (clean_v.split(".")))) return tuple(map(int, (clean_v.split("."))))
try: try:
@ -52,10 +52,10 @@ def require_openssl(required_version):
else: else:
if not command_output: if not command_output:
raise Exception("Could not get openssl version") raise Exception("Could not get openssl version")
openssl_version = command_output.split()[1] openssl_version = str(command_output.split()[1])
return pytest.mark.skipif( return pytest.mark.skipif(
versiontuple(openssl_version) < versiontuple(required_version), versiontuple(openssl_version) < versiontuple(required_version),
reason="openssl v%s is less than required version %s" % (openssl_version, required_version)) reason=f"openssl v{openssl_version} is less than required version {required_version}")
############################################################################### ###############################################################################
@ -71,8 +71,8 @@ def test_dhparam_is_not_generated_if_present(docker_compose):
assert_log_contains("Custom dhparam.pem file found, generation skipped") assert_log_contains("Custom dhparam.pem file found, generation skipped")
# Make sure the dhparam in use is not the default, pre-generated one # Make sure the dhparam in use is not the default, pre-generated one
default_checksum = sut_container.exec_run("md5sum /app/dhparam.pem.default").split() default_checksum = sut_container.exec_run("md5sum /app/dhparam.pem.default").output.split()
current_checksum = sut_container.exec_run("md5sum /etc/nginx/dhparam/dhparam.pem").split() current_checksum = sut_container.exec_run("md5sum /etc/nginx/dhparam/dhparam.pem").output.split()
assert default_checksum[0] != current_checksum[0] assert default_checksum[0] != current_checksum[0]
@ -87,7 +87,7 @@ def test_web5_dhparam_is_used(docker_compose):
sut_container = docker_client.containers.get("nginxproxy") sut_container = docker_client.containers.get("nginxproxy")
assert sut_container.status == "running" assert sut_container.status == "running"
host = "%s:443" % sut_container.attrs["NetworkSettings"]["IPAddress"] host = f"{sut_container.attrs['NetworkSettings']['IPAddress']}:443"
r = subprocess.check_output( r = subprocess.check_output(
"echo '' | openssl s_client -connect %s -cipher 'EDH' | grep 'Server Temp Key'" % host, shell=True) f"echo '' | openssl s_client -connect {host} -cipher 'EDH' | grep 'Server Temp Key'", shell=True)
assert "Server Temp Key: DH, 2048 bits\n" == r assert b"Server Temp Key: X25519, 253 bits\n" == r

View file

@ -8,7 +8,7 @@ web5:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
container_name: nginxproxy container_name: nginxproxy
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro

View file

@ -22,7 +22,7 @@ def assert_log_contains(expected_log_line):
""" """
sut_container = docker_client.containers.get("nginxproxy") sut_container = docker_client.containers.get("nginxproxy")
docker_logs = sut_container.logs(stdout=True, stderr=True, stream=False, follow=False) docker_logs = sut_container.logs(stdout=True, stderr=True, stream=False, follow=False)
assert expected_log_line in docker_logs assert bytes(expected_log_line, encoding="utf8") in docker_logs
############################################################################### ###############################################################################
@ -35,10 +35,10 @@ def test_dhparam_is_generated_if_missing(docker_compose):
sut_container = docker_client.containers.get("nginxproxy") sut_container = docker_client.containers.get("nginxproxy")
assert sut_container.status == "running" assert sut_container.status == "running"
assert_log_contains("Generating DH parameters") assert_log_contains("Generating DSA parameters")
assert_log_contains("dhparam generation complete, reloading nginx") assert_log_contains("dhparam generation complete, reloading nginx")
# Make sure the dhparam in use is not the default, pre-generated one # Make sure the dhparam in use is not the default, pre-generated one
default_checksum = sut_container.exec_run("md5sum /app/dhparam.pem.default").split() default_checksum = sut_container.exec_run("md5sum /app/dhparam.pem.default").output.split()
generated_checksum = sut_container.exec_run("md5sum /etc/nginx/dhparam/dhparam.pem").split() generated_checksum = sut_container.exec_run("md5sum /etc/nginx/dhparam/dhparam.pem").output.split()
assert default_checksum[0] != generated_checksum[0] assert default_checksum[0] != generated_checksum[0]

View file

@ -1,5 +1,5 @@
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
container_name: nginxproxy container_name: nginxproxy
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro

View file

@ -8,7 +8,7 @@ def test_web1_HSTS_default(docker_compose, nginxproxy):
assert "max-age=31536000" == r.headers["Strict-Transport-Security"] assert "max-age=31536000" == r.headers["Strict-Transport-Security"]
# Regression test to ensure HSTS is enabled even when the upstream sends an error in response # Regression test to ensure HSTS is enabled even when the upstream sends an error in response
# Issue #1073 https://github.com/jwilder/nginx-proxy/pull/1073 # Issue #1073 https://github.com/nginx-proxy/nginx-proxy/pull/1073
def test_web1_HSTS_error(docker_compose, nginxproxy): def test_web1_HSTS_error(docker_compose, nginxproxy):
r = nginxproxy.get("https://web1.nginx-proxy.tld/status/500", allow_redirects=False) r = nginxproxy.get("https://web1.nginx-proxy.tld/status/500", allow_redirects=False)
assert "Strict-Transport-Security" in r.headers assert "Strict-Transport-Security" in r.headers
@ -24,3 +24,10 @@ def test_web3_HSTS_custom(docker_compose, nginxproxy):
assert "answer from port 81\n" in r.text assert "answer from port 81\n" in r.text
assert "Strict-Transport-Security" in r.headers assert "Strict-Transport-Security" in r.headers
assert "max-age=86400; includeSubDomains; preload" == r.headers["Strict-Transport-Security"] assert "max-age=86400; includeSubDomains; preload" == r.headers["Strict-Transport-Security"]
# Regression test for issue 1080
# https://github.com/nginx-proxy/nginx-proxy/issues/1080
def test_web4_HSTS_off_noredirect(docker_compose, nginxproxy):
r = nginxproxy.get("https://web4.nginx-proxy.tld/port", allow_redirects=False)
assert "answer from port 81\n" in r.text
assert "Strict-Transport-Security" not in r.headers

View file

@ -24,8 +24,18 @@ web3:
VIRTUAL_HOST: "web3.nginx-proxy.tld" VIRTUAL_HOST: "web3.nginx-proxy.tld"
HSTS: "max-age=86400; includeSubDomains; preload" HSTS: "max-age=86400; includeSubDomains; preload"
web4:
image: web
expose:
- "81"
environment:
WEB_PORTS: "81"
VIRTUAL_HOST: "web4.nginx-proxy.tld"
HSTS: "off"
HTTPS_METHOD: "noredirect"
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -9,7 +9,7 @@ web2:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -9,7 +9,7 @@ web:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -9,7 +9,7 @@ web3:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -3,21 +3,21 @@ import pytest
@pytest.mark.parametrize("subdomain", ["foo", "bar"]) @pytest.mark.parametrize("subdomain", ["foo", "bar"])
def test_web1_http_redirects_to_https(docker_compose, nginxproxy, subdomain): def test_web1_http_redirects_to_https(docker_compose, nginxproxy, subdomain):
r = nginxproxy.get("http://%s.nginx-proxy.tld/" % subdomain, allow_redirects=False) r = nginxproxy.get(f"http://{subdomain}.nginx-proxy.tld/", allow_redirects=False)
assert r.status_code == 301 assert r.status_code == 301
assert "Location" in r.headers assert "Location" in r.headers
assert "https://%s.nginx-proxy.tld/" % subdomain == r.headers['Location'] assert f"https://{subdomain}.nginx-proxy.tld/" == r.headers['Location']
@pytest.mark.parametrize("subdomain", ["foo", "bar"]) @pytest.mark.parametrize("subdomain", ["foo", "bar"])
def test_web1_https_is_forwarded(docker_compose, nginxproxy, subdomain): def test_web1_https_is_forwarded(docker_compose, nginxproxy, subdomain):
r = nginxproxy.get("https://%s.nginx-proxy.tld/port" % subdomain, allow_redirects=False) r = nginxproxy.get(f"https://{subdomain}.nginx-proxy.tld/port", allow_redirects=False)
assert r.status_code == 200 assert r.status_code == 200
assert "answer from port 81\n" in r.text assert "answer from port 81\n" in r.text
@pytest.mark.parametrize("subdomain", ["foo", "bar"]) @pytest.mark.parametrize("subdomain", ["foo", "bar"])
def test_web1_HSTS_policy_is_active(docker_compose, nginxproxy, subdomain): def test_web1_HSTS_policy_is_active(docker_compose, nginxproxy, subdomain):
r = nginxproxy.get("https://%s.nginx-proxy.tld/port" % subdomain, allow_redirects=False) r = nginxproxy.get(f"https://{subdomain}.nginx-proxy.tld/port", allow_redirects=False)
assert "answer from port 81\n" in r.text assert "answer from port 81\n" in r.text
assert "Strict-Transport-Security" in r.headers assert "Strict-Transport-Security" in r.headers

View file

@ -7,7 +7,7 @@ web1:
VIRTUAL_HOST: "*.nginx-proxy.tld" VIRTUAL_HOST: "*.nginx-proxy.tld"
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro

View file

@ -3,10 +3,11 @@ version: "3"
services: services:
proxy: proxy:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./certs:/etc/nginx/certs:ro - ./certs:/etc/nginx/certs:ro
- ../../lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro
web1: web1:
image: web image: web
@ -30,4 +31,4 @@ services:
environment: environment:
WEB_PORTS: "83" WEB_PORTS: "83"
VIRTUAL_HOST: "3.web.nginx-proxy.tld" VIRTUAL_HOST: "3.web.nginx-proxy.tld"
HTTPS_METHOD: nohttps HTTPS_METHOD: nohttps

View file

@ -1,5 +1,5 @@
import pytest import pytest
from backports.ssl_match_hostname import CertificateError from ssl import CertificateError
from requests.exceptions import SSLError from requests.exceptions import SSLError
@ -9,18 +9,19 @@ from requests.exceptions import SSLError
(3, False), (3, False),
]) ])
def test_http_redirects_to_https(docker_compose, nginxproxy, subdomain, should_redirect_to_https): def test_http_redirects_to_https(docker_compose, nginxproxy, subdomain, should_redirect_to_https):
r = nginxproxy.get("http://%s.web.nginx-proxy.tld/port" % subdomain) r = nginxproxy.get(f"http://{subdomain}.web.nginx-proxy.tld/port")
if should_redirect_to_https: if should_redirect_to_https:
assert len(r.history) > 0
assert r.history[0].is_redirect assert r.history[0].is_redirect
assert r.history[0].headers.get("Location") == "https://%s.web.nginx-proxy.tld/port" % subdomain assert r.history[0].headers.get("Location") == f"https://{subdomain}.web.nginx-proxy.tld/port"
assert "answer from port 8%s\n" % subdomain == r.text assert f"answer from port 8{subdomain}\n" == r.text
@pytest.mark.parametrize("subdomain", [1, 2]) @pytest.mark.parametrize("subdomain", [1, 2])
def test_https_get_served(docker_compose, nginxproxy, subdomain): def test_https_get_served(docker_compose, nginxproxy, subdomain):
r = nginxproxy.get("https://%s.web.nginx-proxy.tld/port" % subdomain, allow_redirects=False) r = nginxproxy.get(f"https://{subdomain}.web.nginx-proxy.tld/port", allow_redirects=False)
assert r.status_code == 200 assert r.status_code == 200
assert "answer from port 8%s\n" % subdomain == r.text assert f"answer from port 8{subdomain}\n" == r.text
def test_web3_https_is_500_and_SSL_validation_fails(docker_compose, nginxproxy): def test_web3_https_is_500_and_SSL_validation_fails(docker_compose, nginxproxy):

View file

@ -18,9 +18,9 @@ import pytest
("web4.whatever.nginx-proxy.regexp", 84), ("web4.whatever.nginx-proxy.regexp", 84),
]) ])
def test_wildcard_prefix(docker_compose, nginxproxy, host, expected_port): def test_wildcard_prefix(docker_compose, nginxproxy, host, expected_port):
r = nginxproxy.get("http://%s/port" % host) r = nginxproxy.get(f"http://{host}/port")
assert r.status_code == 200 assert r.status_code == 200
assert r.text == "answer from port %s\n" % expected_port assert r.text == f"answer from port {expected_port}\n"
@pytest.mark.parametrize("host", [ @pytest.mark.parametrize("host", [
@ -28,5 +28,5 @@ def test_wildcard_prefix(docker_compose, nginxproxy, host, expected_port):
"web4.whatever.nginx-proxy.regexp-to-infinity-and-beyond" "web4.whatever.nginx-proxy.regexp-to-infinity-and-beyond"
]) ])
def test_non_matching_host_is_503(docker_compose, nginxproxy, host): def test_non_matching_host_is_503(docker_compose, nginxproxy, host):
r = nginxproxy.get("http://%s/port" % host) r = nginxproxy.get(f"http://{host}/port")
assert r.status_code == 503, r.text assert r.status_code == 503, r.text

View file

@ -32,7 +32,7 @@ web4:
sut: sut:
image: jwilder/nginx-proxy:test image: nginxproxy/nginx-proxy:test
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro - ./lib/ssl/dhparam.pem:/etc/nginx/dhparam/dhparam.pem:ro