init
commit
52ce719bc1
@ -0,0 +1,135 @@
|
|||||||
|
# docker-compose tags #
|
||||||
|
PRODUCT=onlyoffice
|
||||||
|
REPO=${PRODUCT}
|
||||||
|
INSTALLATION_TYPE=COMMUNITY
|
||||||
|
STATUS=""
|
||||||
|
DOCKER_IMAGE_PREFIX=${STATUS}docspace
|
||||||
|
DOCKER_TAG=2.0.3
|
||||||
|
CONTAINER_PREFIX=${PRODUCT}-
|
||||||
|
MYSQL_VERSION=8.0.32
|
||||||
|
MYSQL_IMAGE=mysql:${MYSQL_VERSION}
|
||||||
|
ELK_VERSION=7.16.3
|
||||||
|
SERVICE_PORT=5050
|
||||||
|
DOCUMENT_SERVER_IMAGE_NAME=<CHANGE_ME> # onlyoffice/documentserver-unlim:7.5.1.1
|
||||||
|
DOCKERFILE=Dockerfile.app
|
||||||
|
APP_DOTNET_ENV=""
|
||||||
|
EXTERNAL_PORT=80
|
||||||
|
|
||||||
|
# zookeeper #
|
||||||
|
# ZOO_PORT=2181
|
||||||
|
# ZOO_HOST=${CONTAINER_PREFIX}zookeeper
|
||||||
|
# ZOO_SERVER=server.1=${ZOO_HOST}:2888:3888
|
||||||
|
|
||||||
|
# kafka #
|
||||||
|
# KAFKA_HOST=${CONTAINER_PREFIX}kafka
|
||||||
|
# KAFKA_ADVERTISED_LISTENERS=LISTENER_DOCKER_INTERNAL://${KAFKA_HOST}:9092
|
||||||
|
# KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
|
||||||
|
# KAFKA_INTER_BROKER_LISTENER_NAME=LISTENER_DOCKER_INTERNAL
|
||||||
|
# KAFKA_ZOOKEEPER_CONNECT=${ZOO_HOST}:2181
|
||||||
|
# KAFKA_BROKER_ID=1
|
||||||
|
# KAFKA_LOG4J_LOGGERS=kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO
|
||||||
|
# KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
|
||||||
|
|
||||||
|
# elasticsearch #
|
||||||
|
ELK_CONTAINER_NAME=${CONTAINER_PREFIX}elasticsearch
|
||||||
|
ELK_SHEME=http
|
||||||
|
ELK_HOST=""
|
||||||
|
ELK_PORT=9200
|
||||||
|
|
||||||
|
# app service environment #
|
||||||
|
ENV_EXTENSION=none
|
||||||
|
APP_CORE_BASE_DOMAIN=localhost
|
||||||
|
# APP_URL_PORTAL=http://onlyoffice-router:8092
|
||||||
|
APP_URL_PORTAL=<CHANGE_ME> # Example: https://office.example.com
|
||||||
|
OAUTH_REDIRECT_URL="https://service.onlyoffice.com/oauth2.aspx"
|
||||||
|
WRONG_PORTAL_NAME_URL=""
|
||||||
|
LOG_LEVEL="Warning"
|
||||||
|
DEBUG_INFO="false"
|
||||||
|
|
||||||
|
APP_KNOWN_PROXIES=""
|
||||||
|
APP_KNOWN_NETWORKS=""
|
||||||
|
APP_CORE_MACHINEKEY=<CHANGE_ME> # CHANGE Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12
|
||||||
|
|
||||||
|
CERTIFICATE_PATH=""
|
||||||
|
CERTIFICATE_KEY_PATH=""
|
||||||
|
DHPARAM_PATH=""
|
||||||
|
|
||||||
|
# docs #
|
||||||
|
DOCUMENT_CONTAINER_NAME=${CONTAINER_PREFIX}document-server
|
||||||
|
DOCUMENT_SERVER_URL_EXTERNAL=<CHANGE_ME> # CHANGE Example: "https://docs.example.com"
|
||||||
|
DOCUMENT_SERVER_JWT_SECRET=<CHANGE_ME> # CHANGE Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
|
||||||
|
DOCUMENT_SERVER_JWT_HEADER=AuthorizationJwt
|
||||||
|
DOCUMENT_SERVER_URL_PUBLIC=/ds-vpath/
|
||||||
|
|
||||||
|
# redis #
|
||||||
|
REDIS_CONTAINER_NAME=${CONTAINER_PREFIX}redis
|
||||||
|
REDIS_HOST=""
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_USER_NAME=""
|
||||||
|
REDIS_PASSWORD=""
|
||||||
|
|
||||||
|
# rabbitmq #
|
||||||
|
RABBIT_CONTAINER_NAME=${CONTAINER_PREFIX}rabbitmq
|
||||||
|
RABBIT_HOST=""
|
||||||
|
RABBIT_PORT=5672
|
||||||
|
RABBIT_VIRTUAL_HOST=/
|
||||||
|
RABBIT_USER_NAME=guest
|
||||||
|
RABBIT_PASSWORD=guest
|
||||||
|
|
||||||
|
# mysql #
|
||||||
|
MYSQL_CONTAINER_NAME=${CONTAINER_PREFIX}mysql-server
|
||||||
|
MYSQL_HOST=""
|
||||||
|
MYSQL_PORT=3306
|
||||||
|
MYSQL_ROOT_PASSWORD=<CHANGE_ME> # CHANGE Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 20
|
||||||
|
MYSQL_DATABASE=docspace
|
||||||
|
MYSQL_USER=${PRODUCT}_user
|
||||||
|
MYSQL_PASSWORD=<CHANGE_ME> # CHANGE Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 20
|
||||||
|
DATABASE_MIGRATION=true
|
||||||
|
MIGRATION_TYPE="SAAS"
|
||||||
|
|
||||||
|
# service host #
|
||||||
|
API_SYSTEM_HOST=${CONTAINER_PREFIX}api-system
|
||||||
|
BACKUP_HOST=${CONTAINER_PREFIX}backup
|
||||||
|
BACKUP_BACKGRUOND_TASKS_HOST=${CONTAINER_PREFIX}backup-background-tasks
|
||||||
|
CLEAR_EVENTS_HOST=${CONTAINER_PREFIX}clear-events
|
||||||
|
FILES_HOST=${CONTAINER_PREFIX}files
|
||||||
|
FILES_SERVICES_HOST=${CONTAINER_PREFIX}files-services
|
||||||
|
STORAGE_MIGRATION_HOST=${CONTAINER_PREFIX}storage-migration
|
||||||
|
NOTIFY_HOST=${CONTAINER_PREFIX}notify
|
||||||
|
PEOPLE_SERVER_HOST=${CONTAINER_PREFIX}people-server
|
||||||
|
SOCKET_HOST=${CONTAINER_PREFIX}socket
|
||||||
|
STUDIO_NOTIFY_HOST=${CONTAINER_PREFIX}studio-notify
|
||||||
|
API_HOST=${CONTAINER_PREFIX}api
|
||||||
|
STUDIO_HOST=${CONTAINER_PREFIX}studio
|
||||||
|
SSOAUTH_HOST=${CONTAINER_PREFIX}ssoauth
|
||||||
|
TELEGRAMREPORTS_HOST=${CONTAINER_PREFIX}telegramreports
|
||||||
|
MIGRATION_RUNNER_HOST=${CONTAINER_PREFIX}migration-runner
|
||||||
|
PROXY_HOST=${CONTAINER_PREFIX}proxy
|
||||||
|
ROUTER_HOST=${CONTAINER_PREFIX}router
|
||||||
|
DOCEDITOR_HOST=${CONTAINER_PREFIX}doceditor
|
||||||
|
LOGIN_HOST=${CONTAINER_PREFIX}login
|
||||||
|
HELTHCHECKS_HOST=${CONTAINER_PREFIX}healthchecks
|
||||||
|
|
||||||
|
# router upstream environment #
|
||||||
|
SERVICE_API_SYSTEM=${API_SYSTEM_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_BACKUP=${BACKUP_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_BACKUP_BACKGRUOND_TASKS=${BACKUP_BACKGRUOND_TASKS_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_CLEAR_EVENTS=${CLEAR_EVENTS_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_FILES=${FILES_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_FILES_SERVICES=${FILES_SERVICES_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_STORAGE_MIGRATION=${STORAGE_MIGRATION_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_NOTIFY=${NOTIFY_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_PEOPLE_SERVER=${PEOPLE_SERVER_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_SOCKET=${SOCKET_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_STUDIO_NOTIFY=${STUDIO_NOTIFY_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_API=${API_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_STUDIO=${STUDIO_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_SSOAUTH=${SSOAUTH_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_TELEGRAMREPORTS=${TELEGRAMREPORTS_HOST}:${SERVICE_PORT}
|
||||||
|
SERVICE_DOCEDITOR=${DOCEDITOR_HOST}:5013
|
||||||
|
SERVICE_LOGIN=${LOGIN_HOST}:5011
|
||||||
|
SERVICE_HELTHCHECKS=${HELTHCHECKS_HOST}:${SERVICE_PORT}
|
||||||
|
|
||||||
|
NETWORK_NAME=onlyoffice
|
||||||
|
|
||||||
|
COMPOSE_IGNORE_ORPHANS=True
|
||||||
@ -0,0 +1,30 @@
|
|||||||
|
## ONLYOFFICE DocSpace
|
||||||
|
|
||||||
|
Установочный скрипт:
|
||||||
|
|
||||||
|
[ONLYOFFICE DocSpace Community](https://www.onlyoffice.com/download-docspace.aspx?from=downloadintegrationmenu#docspace-community)
|
||||||
|
|
||||||
|
Плагины для DocSpace:
|
||||||
|
|
||||||
|
[ONLYOFFICE DocSpace plugins](https://github.com/ONLYOFFICE/docspace-plugins)
|
||||||
|
|
||||||
|
[Building plugin](https://api.onlyoffice.com/docspace/pluginssdk/buildingplugin)
|
||||||
|
|
||||||
|
|
||||||
|
#### Заменить в файле `.env`:
|
||||||
|
|
||||||
|
DOCUMENT_SERVER_IMAGE_NAME=<CHANGE_ME> # onlyoffice/documentserver-unlim:7.5.1.1
|
||||||
|
APP_URL_PORTAL=<CHANGE_ME> # Example: https://office.example.com
|
||||||
|
APP_CORE_MACHINEKEY=<CHANGE_ME> # Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12
|
||||||
|
DOCUMENT_SERVER_URL_EXTERNAL=<CHANGE_ME> # Example: "https://docs.example.com"
|
||||||
|
DOCUMENT_SERVER_JWT_SECRET=<CHANGE_ME> # Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 32
|
||||||
|
MYSQL_ROOT_PASSWORD=<CHANGE_ME> # Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 20
|
||||||
|
MYSQL_PASSWORD=<CHANGE_ME> # Example: cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 20
|
||||||
|
|
||||||
|
#### Запуск:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://git.badms.ru/bms/docspace
|
||||||
|
cd docspace
|
||||||
|
docker compose up -d
|
||||||
|
```
|
||||||
@ -0,0 +1,4 @@
|
|||||||
|
CREATE DATABASE IF NOT EXISTS `DB_NAME` CHARACTER SET utf8 COLLATE 'utf8_general_ci';
|
||||||
|
use `DB_NAME`;
|
||||||
|
set @@global.max_allowed_packet = 104857600;
|
||||||
|
set @@global.group_concat_max_len = 20971520;
|
||||||
@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
PRODUCT="docspace"
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
|
DOCKERCOMPOSE=$(dirname "$DIR")
|
||||||
|
|
||||||
|
if [ -f "${DOCKERCOMPOSE}/docspace.yml" ]; then
|
||||||
|
:
|
||||||
|
elif [ -f "/app/onlyoffice/${PRODUCT}.yml" ]; then
|
||||||
|
DOCKERCOMPOSE="/app/onlyoffice"
|
||||||
|
else
|
||||||
|
echo "Error: yml files not found." && exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
FILES=("${PRODUCT}" "notify" "healthchecks" "proxy" "ds" "rabbitmq" "redis" "elasticsearch" "db")
|
||||||
|
|
||||||
|
LOG_DIR="${DOCKERCOMPOSE}/logs"
|
||||||
|
mkdir -p ${LOG_DIR}
|
||||||
|
|
||||||
|
echo "Creating ${PRODUCT} logs to a directory ${LOG_DIR}..."
|
||||||
|
for FILE in "${FILES[@]}"; do
|
||||||
|
SERVICE_NAMES=($(docker-compose -f ${DOCKERCOMPOSE}/${FILE}.yml config --services))
|
||||||
|
for SERVICE_NAME in "${SERVICE_NAMES[@]}"; do
|
||||||
|
if [[ $(docker-compose -f ${DOCKERCOMPOSE}/${FILE}.yml ps -q ${SERVICE_NAME} | wc -l) -eq 1 ]]; then
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/${FILE}.yml logs ${SERVICE_NAME} > ${LOG_DIR}/${SERVICE_NAME}.log
|
||||||
|
else
|
||||||
|
echo "The ${SERVICE_NAME} service is not running"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
echo "OK"
|
||||||
@ -0,0 +1,151 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
PRODUCT="docspace"
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
|
DOCKERCOMPOSE=$(dirname "$DIR")
|
||||||
|
LETSENCRYPT="/etc/letsencrypt/live";
|
||||||
|
DHPARAM_FILE="/etc/ssl/certs/dhparam.pem"
|
||||||
|
WEBROOT_PATH="/letsencrypt"
|
||||||
|
|
||||||
|
# Check if configuration files are present
|
||||||
|
if [ -f "/app/onlyoffice/.env" -a -f "/app/onlyoffice/proxy.yml" -a -f "/app/onlyoffice/proxy-ssl.yml" ]; then
|
||||||
|
DOCKERCOMPOSE="/app/onlyoffice"
|
||||||
|
DIR="/app/onlyoffice/config"
|
||||||
|
elif [ -f "${DOCKERCOMPOSE}/.env" -a -f "${DOCKERCOMPOSE}/proxy.yml" -a -f "${DOCKERCOMPOSE}/proxy-ssl.yml" ]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
echo "Error: configuration files not found." && exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
help(){
|
||||||
|
echo ""
|
||||||
|
echo "This script provided to automatically setup SSL Certificates for DocSpace"
|
||||||
|
echo "Automatically get Let's Encrypt SSL Certificates:"
|
||||||
|
echo " docspace-ssl-setup EMAIL DOMAIN"
|
||||||
|
echo " EMAIL Email used for registration and recovery contact."
|
||||||
|
echo " Use comma to register multiple emails, ex:"
|
||||||
|
echo " u1@example.com,u2@example.com."
|
||||||
|
echo " DOMAIN Domain name to apply"
|
||||||
|
echo ""
|
||||||
|
echo "Using your own certificates via the -f or --file parameter:"
|
||||||
|
echo " docspace-ssl-setup --file DOMAIN CERTIFICATE PRIVATEKEY"
|
||||||
|
echo " DOMAIN Domain name to apply."
|
||||||
|
echo " CERTIFICATE Path to the certificate file for the domain."
|
||||||
|
echo " PRIVATEKEY Path to the private key file for the certificate."
|
||||||
|
echo ""
|
||||||
|
echo "Return to the default proxy configuration using the -d or --default parameter:"
|
||||||
|
echo " docspace-ssl-setup --default"
|
||||||
|
echo ""
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
case $1 in
|
||||||
|
-f | --file )
|
||||||
|
if [ -n "$2" ] && [ -n "$3" ] && [ -n "$4" ]; then
|
||||||
|
echo "Using specified files to configure SSL..."
|
||||||
|
DOMAIN=$2
|
||||||
|
CERTIFICATE_FILE=$3
|
||||||
|
PRIVATEKEY_FILE=$4
|
||||||
|
else
|
||||||
|
help
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
-d | --default )
|
||||||
|
echo "Return to the default proxy configuration..."
|
||||||
|
if [ -z "$(awk -F '=' '/^\s*DOCUMENT_SERVER_URL_EXTERNAL/{gsub(/^[[:space:]]*"|"[[:space:]]*$/, "", $2); print $2}' ${DOCKERCOMPOSE}/.env)" ]; then
|
||||||
|
sed "s#\(APP_URL_PORTAL=\).*#\1\"http://onlyoffice-router:8092\"#g" -i ${DOCKERCOMPOSE}/.env
|
||||||
|
else
|
||||||
|
sed "s#\(APP_URL_PORTAL=\).*#\1\"http://$(curl -s ifconfig.me)\"#g" -i ${DOCKERCOMPOSE}/.env
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ -f "${DIR}/${PRODUCT}-renew-letsencrypt" ]] && rm -rf "${DIR}/${PRODUCT}-renew-letsencrypt"
|
||||||
|
|
||||||
|
if docker ps -f "name=onlyoffice-proxy" --format '{{.Names}}' | grep -q "onlyoffice-proxy"; then
|
||||||
|
if docker ps -f "name=onlyoffice-proxy" --format "{{.Ports}}" | grep -q "443"; then
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/proxy-ssl.yml down
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/proxy.yml up -d
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/docspace.yml restart onlyoffice-files
|
||||||
|
|
||||||
|
echo "OK"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
if [ "$#" -ge "2" ]; then
|
||||||
|
MAIL=$1
|
||||||
|
DOMAIN=$2
|
||||||
|
LETSENCRYPT_ENABLE="true"
|
||||||
|
|
||||||
|
if ! docker volume inspect "onlyoffice_webroot_path" &> /dev/null; then
|
||||||
|
echo "Error: missing webroot_path volume" && exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker ps -f "name=onlyoffice-proxy" --format '{{.Names}}' | grep -q "onlyoffice-proxy"; then
|
||||||
|
echo "Error: the proxy container is not running" && exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Generating Let's Encrypt SSL Certificates..."
|
||||||
|
|
||||||
|
# Request and generate Let's Encrypt SSL certificate
|
||||||
|
docker run -it --rm \
|
||||||
|
-v /etc/letsencrypt:/etc/letsencrypt \
|
||||||
|
-v /var/lib/letsencrypt:/var/lib/letsencrypt \
|
||||||
|
-v /var/log:/var/log \
|
||||||
|
-v onlyoffice_webroot_path:${WEBROOT_PATH} \
|
||||||
|
certbot/certbot certonly \
|
||||||
|
--expand --webroot -w ${WEBROOT_PATH} \
|
||||||
|
--cert-name ${PRODUCT} --non-interactive --agree-tos --email ${MAIL} -d ${DOMAIN}
|
||||||
|
else
|
||||||
|
help
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
[[ ! -f "${DHPARAM_FILE}" ]] && openssl dhparam -out ${DHPARAM_FILE} 2048
|
||||||
|
CERTIFICATE_FILE="${CERTIFICATE_FILE:-"${LETSENCRYPT}/${PRODUCT}/fullchain.pem"}"
|
||||||
|
PRIVATEKEY_FILE="${PRIVATEKEY_FILE:-"${LETSENCRYPT}/${PRODUCT}/privkey.pem"}"
|
||||||
|
|
||||||
|
if [ -f "${CERTIFICATE_FILE}" ]; then
|
||||||
|
if [ -f "${PRIVATEKEY_FILE}" ]; then
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/proxy.yml down
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/docspace.yml stop onlyoffice-files
|
||||||
|
|
||||||
|
sed -i "s~\(APP_URL_PORTAL=\).*~\1\"https://${DOMAIN}\"~g" ${DOCKERCOMPOSE}/.env
|
||||||
|
sed -i "s~\(CERTIFICATE_PATH=\).*~\1\"${CERTIFICATE_FILE}\"~g" ${DOCKERCOMPOSE}/.env
|
||||||
|
sed -i "s~\(CERTIFICATE_KEY_PATH=\).*~\1\"${PRIVATEKEY_FILE}\"~g" ${DOCKERCOMPOSE}/.env
|
||||||
|
sed -i "s~\(DHPARAM_PATH=\).*~\1\"${DHPARAM_FILE}\"~g" ${DOCKERCOMPOSE}/.env
|
||||||
|
|
||||||
|
if [[ "${LETSENCRYPT_ENABLE}" = "true" ]]; then
|
||||||
|
# Create and set permissions for docspace-renew-letsencrypt
|
||||||
|
echo '#!/bin/bash' > ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo "docker-compose -f ${DOCKERCOMPOSE}/proxy-ssl.yml down" >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo 'docker run -it --rm \' >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo ' -v /etc/letsencrypt:/etc/letsencrypt \' >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo ' -v /var/lib/letsencrypt:/var/lib/letsencrypt \' >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo ' certbot/certbot renew' >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
echo "docker-compose -f ${DOCKERCOMPOSE}/proxy-ssl.yml up -d" >> ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
|
||||||
|
chmod a+x ${DIR}/${PRODUCT}-renew-letsencrypt
|
||||||
|
|
||||||
|
# Add cron job if /etc/cron.d directory exists
|
||||||
|
if [ -d /etc/cron.d ]; then
|
||||||
|
echo -e "@weekly root ${DIR}/${PRODUCT}-renew-letsencrypt" | tee /etc/cron.d/${PRODUCT}-letsencrypt
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/proxy-ssl.yml up -d
|
||||||
|
docker-compose -f ${DOCKERCOMPOSE}/docspace.yml up -d onlyoffice-files
|
||||||
|
|
||||||
|
echo "OK"
|
||||||
|
else
|
||||||
|
echo "Error: private key file at path ${PRIVATEKEY_FILE} not found." && exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: certificate file at path ${CERTIFICATE_FILE} not found." && exit 1
|
||||||
|
fi
|
||||||
@ -0,0 +1,187 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#
|
||||||
|
# (c) Copyright Ascensio System SIA 2021
|
||||||
|
#
|
||||||
|
# This program is a free software product. You can redistribute it and/or
|
||||||
|
# modify it under the terms of the GNU Affero General Public License (AGPL)
|
||||||
|
# version 3 as published by the Free Software Foundation. In accordance with
|
||||||
|
# Section 7(a) of the GNU AGPL its Section 15 shall be amended to the effect
|
||||||
|
# that Ascensio System SIA expressly excludes the warranty of non-infringement
|
||||||
|
# of any third-party rights.
|
||||||
|
#
|
||||||
|
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
|
||||||
|
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For
|
||||||
|
# details, see the GNU AGPL at: http://www.gnu.org/licenses/agpl-3.0.html
|
||||||
|
#
|
||||||
|
# You can contact Ascensio System SIA at 20A-12 Ernesta Birznieka-Upisha
|
||||||
|
# street, Riga, Latvia, EU, LV-1050.
|
||||||
|
#
|
||||||
|
# The interactive user interfaces in modified source and object code versions
|
||||||
|
# of the Program must display Appropriate Legal Notices, as required under
|
||||||
|
# Section 5 of the GNU AGPL version 3.
|
||||||
|
#
|
||||||
|
# Pursuant to Section 7(b) of the License you must retain the original Product
|
||||||
|
# logo when distributing the program. Pursuant to Section 7(e) we decline to
|
||||||
|
# grant you any rights under trademark law for use of our trademarks.
|
||||||
|
#
|
||||||
|
# All the Product's GUI elements, including illustrations and icon sets, as
|
||||||
|
# well as technical writing content are licensed under the terms of the
|
||||||
|
# Creative Commons Attribution-ShareAlike 4.0 International. See the License
|
||||||
|
# terms at http://creativecommons.org/licenses/by-sa/4.0/legalcode
|
||||||
|
#
|
||||||
|
|
||||||
|
PARAMETERS="$PARAMETERS -it COMMUNITY";
|
||||||
|
DOCKER="";
|
||||||
|
LOCAL_SCRIPTS="false"
|
||||||
|
product="docspace"
|
||||||
|
FILE_NAME="$(basename "$0")"
|
||||||
|
|
||||||
|
while [ "$1" != "" ]; do
|
||||||
|
case $1 in
|
||||||
|
-ls | --localscripts )
|
||||||
|
if [ "$2" == "true" ] || [ "$2" == "false" ]; then
|
||||||
|
PARAMETERS="$PARAMETERS ${1}";
|
||||||
|
LOCAL_SCRIPTS=$2
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
-gb | --gitbranch )
|
||||||
|
if [ "$2" != "" ]; then
|
||||||
|
PARAMETERS="$PARAMETERS ${1}";
|
||||||
|
GIT_BRANCH=$2
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
docker )
|
||||||
|
DOCKER="true";
|
||||||
|
shift && continue
|
||||||
|
;;
|
||||||
|
|
||||||
|
package )
|
||||||
|
DOCKER="false";
|
||||||
|
shift && continue
|
||||||
|
;;
|
||||||
|
|
||||||
|
"-?" | -h | --help )
|
||||||
|
if [ -z "$DOCKER" ]; then
|
||||||
|
echo "Run 'bash $FILE_NAME docker' to install docker version of application or 'bash $FILE_NAME package' to install deb/rpm version."
|
||||||
|
echo "Run 'bash $FILE_NAME docker -h' or 'bash $FILE_NAME package -h' to get more details."
|
||||||
|
exit 0;
|
||||||
|
fi
|
||||||
|
PARAMETERS="$PARAMETERS -ht $FILE_NAME";
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
PARAMETERS="$PARAMETERS ${1}";
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
root_checking () {
|
||||||
|
if [ ! $( id -u ) -eq 0 ]; then
|
||||||
|
echo "To perform this action you must be logged in with root rights"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
command_exists () {
|
||||||
|
type "$1" &> /dev/null;
|
||||||
|
}
|
||||||
|
|
||||||
|
install_curl () {
|
||||||
|
if command_exists apt-get; then
|
||||||
|
apt-get -y update
|
||||||
|
apt-get -y -q install curl
|
||||||
|
elif command_exists yum; then
|
||||||
|
yum -y install curl
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command_exists curl; then
|
||||||
|
echo "command curl not found"
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
read_installation_method () {
|
||||||
|
echo "Select 'Y' to install ONLYOFFICE $product using Docker (recommended). Select 'N' to install it using RPM/DEB packages.";
|
||||||
|
read -p "Install with Docker [Y/N/C]? " choice
|
||||||
|
case "$choice" in
|
||||||
|
y|Y )
|
||||||
|
DOCKER="true";
|
||||||
|
;;
|
||||||
|
|
||||||
|
n|N )
|
||||||
|
DOCKER="false";
|
||||||
|
;;
|
||||||
|
|
||||||
|
c|C )
|
||||||
|
exit 0;
|
||||||
|
;;
|
||||||
|
|
||||||
|
* )
|
||||||
|
echo "Please, enter Y, N or C to cancel";
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ "$DOCKER" == "" ]; then
|
||||||
|
read_installation_method;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
root_checking
|
||||||
|
|
||||||
|
if ! command_exists curl ; then
|
||||||
|
install_curl;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DOCKER" ]; then
|
||||||
|
read_installation_method;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z $GIT_BRANCH ]; then
|
||||||
|
DOWNLOAD_URL_PREFIX="https://download.onlyoffice.com/${product}"
|
||||||
|
else
|
||||||
|
DOWNLOAD_URL_PREFIX="https://raw.githubusercontent.com/ONLYOFFICE/${product}-buildtools/${GIT_BRANCH}/install/OneClickInstall"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DOCKER" == "true" ]; then
|
||||||
|
if [ "$LOCAL_SCRIPTS" == "true" ]; then
|
||||||
|
bash install-Docker.sh ${PARAMETERS}
|
||||||
|
else
|
||||||
|
curl -s -O ${DOWNLOAD_URL_PREFIX}/install-Docker.sh
|
||||||
|
bash install-Docker.sh ${PARAMETERS}
|
||||||
|
rm install-Docker.sh
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ -f /etc/redhat-release ] ; then
|
||||||
|
DIST=$(cat /etc/redhat-release |sed s/\ release.*//);
|
||||||
|
REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//);
|
||||||
|
|
||||||
|
REV_PARTS=(${REV//\./ });
|
||||||
|
REV=${REV_PARTS[0]};
|
||||||
|
|
||||||
|
if [[ "${DIST}" == CentOS* ]] && [ ${REV} -lt 7 ]; then
|
||||||
|
echo "CentOS 7 or later is required";
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
if [ "$LOCAL_SCRIPTS" == "true" ]; then
|
||||||
|
bash install-RedHat.sh ${PARAMETERS}
|
||||||
|
else
|
||||||
|
curl -s -O ${DOWNLOAD_URL_PREFIX}/install-RedHat.sh
|
||||||
|
bash install-RedHat.sh ${PARAMETERS}
|
||||||
|
rm install-RedHat.sh
|
||||||
|
fi
|
||||||
|
elif [ -f /etc/debian_version ] ; then
|
||||||
|
if [ "$LOCAL_SCRIPTS" == "true" ]; then
|
||||||
|
bash install-Debian.sh ${PARAMETERS}
|
||||||
|
else
|
||||||
|
curl -s -O ${DOWNLOAD_URL_PREFIX}/install-Debian.sh
|
||||||
|
bash install-Debian.sh ${PARAMETERS}
|
||||||
|
rm install-Debian.sh
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Not supported OS";
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
fi
|
||||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,5 @@
|
|||||||
|
[mysqld]
|
||||||
|
sql_mode = 'NO_ENGINE_SUBSTITUTION'
|
||||||
|
max_connections = 1000
|
||||||
|
max_allowed_packet = 1048576000
|
||||||
|
group_concat_max_len = 2048
|
||||||
@ -0,0 +1,67 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# vim:sw=4:ts=4:et
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
entrypoint_log() {
|
||||||
|
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ME=$(basename $0)
|
||||||
|
DEFAULT_CONF_FILE="etc/nginx/conf.d/default.conf"
|
||||||
|
|
||||||
|
# check if we have ipv6 available
|
||||||
|
if [ ! -f "/proc/net/if_inet6" ]; then
|
||||||
|
entrypoint_log "$ME: info: ipv6 not available"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "/$DEFAULT_CONF_FILE" ]; then
|
||||||
|
entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE is not a file or does not exist"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if the file can be modified, e.g. not on a r/o filesystem
|
||||||
|
touch /$DEFAULT_CONF_FILE 2>/dev/null || { entrypoint_log "$ME: info: can not modify /$DEFAULT_CONF_FILE (read-only file system?)"; exit 0; }
|
||||||
|
|
||||||
|
# check if the file is already modified, e.g. on a container restart
|
||||||
|
grep -q "listen \[::]\:80;" /$DEFAULT_CONF_FILE && { entrypoint_log "$ME: info: IPv6 listen already enabled"; exit 0; }
|
||||||
|
|
||||||
|
if [ -f "/etc/os-release" ]; then
|
||||||
|
. /etc/os-release
|
||||||
|
else
|
||||||
|
entrypoint_log "$ME: info: can not guess the operating system"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
entrypoint_log "$ME: info: Getting the checksum of /$DEFAULT_CONF_FILE"
|
||||||
|
|
||||||
|
case "$ID" in
|
||||||
|
"debian")
|
||||||
|
CHECKSUM=$(dpkg-query --show --showformat='${Conffiles}\n' nginx | grep $DEFAULT_CONF_FILE | cut -d' ' -f 3)
|
||||||
|
echo "$CHECKSUM /$DEFAULT_CONF_FILE" | md5sum -c - >/dev/null 2>&1 || {
|
||||||
|
entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
;;
|
||||||
|
"alpine")
|
||||||
|
CHECKSUM=$(apk manifest nginx 2>/dev/null| grep $DEFAULT_CONF_FILE | cut -d' ' -f 1 | cut -d ':' -f 2)
|
||||||
|
echo "$CHECKSUM /$DEFAULT_CONF_FILE" | sha1sum -c - >/dev/null 2>&1 || {
|
||||||
|
entrypoint_log "$ME: info: /$DEFAULT_CONF_FILE differs from the packaged version"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
entrypoint_log "$ME: info: Unsupported distribution"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# enable ipv6 on default.conf listen sockets
|
||||||
|
sed -i -E 's,listen 80;,listen 80;\n listen [::]:80;,' /$DEFAULT_CONF_FILE
|
||||||
|
|
||||||
|
entrypoint_log "$ME: info: Enabled listen on IPv6 in /$DEFAULT_CONF_FILE"
|
||||||
|
|
||||||
|
exit 0
|
||||||
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# vim:sw=2:ts=2:sts=2:et
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
LC_ALL=C
|
||||||
|
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||||
|
|
||||||
|
[ "${NGINX_ENTRYPOINT_LOCAL_RESOLVERS:-}" ] || return 0
|
||||||
|
|
||||||
|
export NGINX_LOCAL_RESOLVERS=$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print $2}' /etc/resolv.conf)
|
||||||
@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
ME=$(basename $0)
|
||||||
|
|
||||||
|
entrypoint_log() {
|
||||||
|
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
add_stream_block() {
|
||||||
|
local conffile="/etc/nginx/nginx.conf"
|
||||||
|
|
||||||
|
if grep -q -E "\s*stream\s*\{" "$conffile"; then
|
||||||
|
entrypoint_log "$ME: $conffile contains a stream block; include $stream_output_dir/*.conf to enable stream templates"
|
||||||
|
else
|
||||||
|
# check if the file can be modified, e.g. not on a r/o filesystem
|
||||||
|
touch "$conffile" 2>/dev/null || { entrypoint_log "$ME: info: can not modify $conffile (read-only file system?)"; exit 0; }
|
||||||
|
entrypoint_log "$ME: Appending stream block to $conffile to include $stream_output_dir/*.conf"
|
||||||
|
cat << END >> "$conffile"
|
||||||
|
# added by "$ME" on "$(date)"
|
||||||
|
stream {
|
||||||
|
include $stream_output_dir/*.conf;
|
||||||
|
}
|
||||||
|
END
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
auto_envsubst() {
|
||||||
|
local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
|
||||||
|
local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
|
||||||
|
local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
|
||||||
|
local stream_suffix="${NGINX_ENVSUBST_STREAM_TEMPLATE_SUFFIX:-.stream-template}"
|
||||||
|
local stream_output_dir="${NGINX_ENVSUBST_STREAM_OUTPUT_DIR:-/etc/nginx/stream-conf.d}"
|
||||||
|
local filter="${NGINX_ENVSUBST_FILTER:-}"
|
||||||
|
|
||||||
|
local template defined_envs relative_path output_path subdir
|
||||||
|
defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
|
||||||
|
[ -d "$template_dir" ] || return 0
|
||||||
|
if [ ! -w "$output_dir" ]; then
|
||||||
|
entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
|
||||||
|
relative_path="${template#$template_dir/}"
|
||||||
|
output_path="$output_dir/${relative_path%$suffix}"
|
||||||
|
subdir=$(dirname "$relative_path")
|
||||||
|
# create a subdirectory where the template file exists
|
||||||
|
mkdir -p "$output_dir/$subdir"
|
||||||
|
entrypoint_log "$ME: Running envsubst on $template to $output_path"
|
||||||
|
envsubst "$defined_envs" < "$template" > "$output_path"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Print the first file with the stream suffix, this will be false if there are none
|
||||||
|
if test -n "$(find "$template_dir" -name "*$stream_suffix" -print -quit)"; then
|
||||||
|
mkdir -p "$stream_output_dir"
|
||||||
|
if [ ! -w "$stream_output_dir" ]; then
|
||||||
|
entrypoint_log "$ME: ERROR: $template_dir exists, but $stream_output_dir is not writable"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
add_stream_block
|
||||||
|
find "$template_dir" -follow -type f -name "*$stream_suffix" -print | while read -r template; do
|
||||||
|
relative_path="${template#$template_dir/}"
|
||||||
|
output_path="$stream_output_dir/${relative_path%$stream_suffix}"
|
||||||
|
subdir=$(dirname "$relative_path")
|
||||||
|
# create a subdirectory where the template file exists
|
||||||
|
mkdir -p "$stream_output_dir/$subdir"
|
||||||
|
entrypoint_log "$ME: Running envsubst on $template to $output_path"
|
||||||
|
envsubst "$defined_envs" < "$template" > "$output_path"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
auto_envsubst
|
||||||
|
|
||||||
|
exit 0
|
||||||
@ -0,0 +1,188 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# vim:sw=2:ts=2:sts=2:et
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
LC_ALL=C
|
||||||
|
ME=$( basename "$0" )
|
||||||
|
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||||
|
|
||||||
|
[ "${NGINX_ENTRYPOINT_WORKER_PROCESSES_AUTOTUNE:-}" ] || exit 0
|
||||||
|
|
||||||
|
touch /etc/nginx/nginx.conf 2>/dev/null || { echo >&2 "$ME: error: can not modify /etc/nginx/nginx.conf (read-only file system?)"; exit 0; }
|
||||||
|
|
||||||
|
ceildiv() {
|
||||||
|
num=$1
|
||||||
|
div=$2
|
||||||
|
echo $(( (num + div - 1) / div ))
|
||||||
|
}
|
||||||
|
|
||||||
|
get_cpuset() {
|
||||||
|
cpusetroot=$1
|
||||||
|
cpusetfile=$2
|
||||||
|
ncpu=0
|
||||||
|
[ -f "$cpusetroot/$cpusetfile" ] || return 1
|
||||||
|
for token in $( tr ',' ' ' < "$cpusetroot/$cpusetfile" ); do
|
||||||
|
case "$token" in
|
||||||
|
*-*)
|
||||||
|
count=$( seq $(echo "$token" | tr '-' ' ') | wc -l )
|
||||||
|
ncpu=$(( ncpu+count ))
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
ncpu=$(( ncpu+1 ))
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
echo "$ncpu"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_quota() {
|
||||||
|
cpuroot=$1
|
||||||
|
ncpu=0
|
||||||
|
[ -f "$cpuroot/cpu.cfs_quota_us" ] || return 1
|
||||||
|
[ -f "$cpuroot/cpu.cfs_period_us" ] || return 1
|
||||||
|
cfs_quota=$( cat "$cpuroot/cpu.cfs_quota_us" )
|
||||||
|
cfs_period=$( cat "$cpuroot/cpu.cfs_period_us" )
|
||||||
|
[ "$cfs_quota" = "-1" ] && return 1
|
||||||
|
[ "$cfs_period" = "0" ] && return 1
|
||||||
|
ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
|
||||||
|
[ "$ncpu" -gt 0 ] || return 1
|
||||||
|
echo "$ncpu"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_quota_v2() {
|
||||||
|
cpuroot=$1
|
||||||
|
ncpu=0
|
||||||
|
[ -f "$cpuroot/cpu.max" ] || return 1
|
||||||
|
cfs_quota=$( cut -d' ' -f 1 < "$cpuroot/cpu.max" )
|
||||||
|
cfs_period=$( cut -d' ' -f 2 < "$cpuroot/cpu.max" )
|
||||||
|
[ "$cfs_quota" = "max" ] && return 1
|
||||||
|
[ "$cfs_period" = "0" ] && return 1
|
||||||
|
ncpu=$( ceildiv "$cfs_quota" "$cfs_period" )
|
||||||
|
[ "$ncpu" -gt 0 ] || return 1
|
||||||
|
echo "$ncpu"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_cgroup_v1_path() {
|
||||||
|
needle=$1
|
||||||
|
found=
|
||||||
|
foundroot=
|
||||||
|
mountpoint=
|
||||||
|
|
||||||
|
[ -r "/proc/self/mountinfo" ] || return 1
|
||||||
|
[ -r "/proc/self/cgroup" ] || return 1
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
case "$needle" in
|
||||||
|
"cpuset")
|
||||||
|
case "$line" in
|
||||||
|
*cpuset*)
|
||||||
|
found=$( echo "$line" | cut -d ' ' -f 4,5 )
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
"cpu")
|
||||||
|
case "$line" in
|
||||||
|
*cpuset*)
|
||||||
|
;;
|
||||||
|
*cpu,cpuacct*|*cpuacct,cpu|*cpuacct*|*cpu*)
|
||||||
|
found=$( echo "$line" | cut -d ' ' -f 4,5 )
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
esac
|
||||||
|
done << __EOF__
|
||||||
|
$( grep -F -- '- cgroup ' /proc/self/mountinfo )
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
controller=$( echo "$line" | cut -d: -f 2 )
|
||||||
|
case "$needle" in
|
||||||
|
"cpuset")
|
||||||
|
case "$controller" in
|
||||||
|
cpuset)
|
||||||
|
mountpoint=$( echo "$line" | cut -d: -f 3 )
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
"cpu")
|
||||||
|
case "$controller" in
|
||||||
|
cpu,cpuacct|cpuacct,cpu|cpuacct|cpu)
|
||||||
|
mountpoint=$( echo "$line" | cut -d: -f 3 )
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done << __EOF__
|
||||||
|
$( grep -F -- 'cpu' /proc/self/cgroup )
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
case "${found%% *}" in
|
||||||
|
"/")
|
||||||
|
foundroot="${found##* }$mountpoint"
|
||||||
|
;;
|
||||||
|
"$mountpoint")
|
||||||
|
foundroot="${found##* }"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo "$foundroot"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_cgroup_v2_path() {
|
||||||
|
found=
|
||||||
|
foundroot=
|
||||||
|
mountpoint=
|
||||||
|
|
||||||
|
[ -r "/proc/self/mountinfo" ] || return 1
|
||||||
|
[ -r "/proc/self/cgroup" ] || return 1
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
found=$( echo "$line" | cut -d ' ' -f 4,5 )
|
||||||
|
done << __EOF__
|
||||||
|
$( grep -F -- '- cgroup2 ' /proc/self/mountinfo )
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
mountpoint=$( echo "$line" | cut -d: -f 3 )
|
||||||
|
done << __EOF__
|
||||||
|
$( grep -F -- '0::' /proc/self/cgroup )
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
case "${found%% *}" in
|
||||||
|
"")
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
"/")
|
||||||
|
foundroot="${found##* }$mountpoint"
|
||||||
|
;;
|
||||||
|
"$mountpoint" | /../*)
|
||||||
|
foundroot="${found##* }"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo "$foundroot"
|
||||||
|
}
|
||||||
|
|
||||||
|
ncpu_online=$( getconf _NPROCESSORS_ONLN )
|
||||||
|
ncpu_cpuset=
|
||||||
|
ncpu_quota=
|
||||||
|
ncpu_cpuset_v2=
|
||||||
|
ncpu_quota_v2=
|
||||||
|
|
||||||
|
cpuset=$( get_cgroup_v1_path "cpuset" ) && ncpu_cpuset=$( get_cpuset "$cpuset" "cpuset.effective_cpus" ) || ncpu_cpuset=$ncpu_online
|
||||||
|
cpu=$( get_cgroup_v1_path "cpu" ) && ncpu_quota=$( get_quota "$cpu" ) || ncpu_quota=$ncpu_online
|
||||||
|
cgroup_v2=$( get_cgroup_v2_path ) && ncpu_cpuset_v2=$( get_cpuset "$cgroup_v2" "cpuset.cpus.effective" ) || ncpu_cpuset_v2=$ncpu_online
|
||||||
|
cgroup_v2=$( get_cgroup_v2_path ) && ncpu_quota_v2=$( get_quota_v2 "$cgroup_v2" ) || ncpu_quota_v2=$ncpu_online
|
||||||
|
|
||||||
|
ncpu=$( printf "%s\n%s\n%s\n%s\n%s\n" \
|
||||||
|
"$ncpu_online" \
|
||||||
|
"$ncpu_cpuset" \
|
||||||
|
"$ncpu_quota" \
|
||||||
|
"$ncpu_cpuset_v2" \
|
||||||
|
"$ncpu_quota_v2" \
|
||||||
|
| sort -n \
|
||||||
|
| head -n 1 )
|
||||||
|
|
||||||
|
sed -i.bak -r 's/^(worker_processes)(.*)$/# Commented out by '"$ME"' on '"$(date)"'\n#\1\2\n\1 '"$ncpu"';/' /etc/nginx/nginx.conf
|
||||||
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# vim:sw=4:ts=4:et
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
entrypoint_log() {
|
||||||
|
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
|
||||||
|
entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
|
||||||
|
|
||||||
|
entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
|
||||||
|
find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
|
||||||
|
case "$f" in
|
||||||
|
*.envsh)
|
||||||
|
if [ -x "$f" ]; then
|
||||||
|
entrypoint_log "$0: Sourcing $f";
|
||||||
|
. "$f"
|
||||||
|
else
|
||||||
|
# warn on shell scripts without exec bit
|
||||||
|
entrypoint_log "$0: Ignoring $f, not executable";
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*.sh)
|
||||||
|
if [ -x "$f" ]; then
|
||||||
|
entrypoint_log "$0: Launching $f";
|
||||||
|
"$f"
|
||||||
|
else
|
||||||
|
# warn on shell scripts without exec bit
|
||||||
|
entrypoint_log "$0: Ignoring $f, not executable";
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*) entrypoint_log "$0: Ignoring $f";;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
entrypoint_log "$0: Configuration complete; ready for start up"
|
||||||
|
else
|
||||||
|
entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
exec "$@"
|
||||||
@ -0,0 +1,4 @@
|
|||||||
|
location ~ /.well-known/acme-challenge {
|
||||||
|
root "/letsencrypt";
|
||||||
|
allow all;
|
||||||
|
}
|
||||||
@ -0,0 +1,76 @@
|
|||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $proxy_connection;
|
||||||
|
proxy_set_header Host $the_host;
|
||||||
|
proxy_set_header X-Forwarded-Host $the_host;
|
||||||
|
proxy_set_header X-Forwarded-Proto $the_scheme;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
|
## HTTP host
|
||||||
|
server {
|
||||||
|
listen 0.0.0.0:80;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
## Redirects all traffic to the HTTPS host
|
||||||
|
root /nowhere; ## root doesn't have to be a valid path since we are redirecting
|
||||||
|
rewrite ^ https://$host$request_uri? permanent;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 127.0.0.1:80;
|
||||||
|
listen [::1]:80;
|
||||||
|
server_name localhost;
|
||||||
|
|
||||||
|
client_max_body_size 4G;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://$router_host:8092;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
## HTTPS host
|
||||||
|
server {
|
||||||
|
listen 0.0.0.0:443 ssl;
|
||||||
|
listen [::]:443 ssl default_server;
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
|
||||||
|
client_max_body_size 4G;
|
||||||
|
|
||||||
|
## Strong SSL Security
|
||||||
|
## https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||||
|
ssl_certificate /usr/local/share/ca-certificates/tls.crt;
|
||||||
|
ssl_certificate_key /etc/ssl/private/tls.key;
|
||||||
|
# Uncomment string below and specify the path to the file with the password if you use encrypted certificate key
|
||||||
|
# ssl_password_file $ssl_password_path;
|
||||||
|
ssl_verify_client off;
|
||||||
|
|
||||||
|
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
|
||||||
|
|
||||||
|
ssl_protocols TLSv1.2;
|
||||||
|
ssl_session_cache builtin:1000 shared:SSL:10m;
|
||||||
|
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
|
||||||
|
add_header Strict-Transport-Security max-age=31536000;
|
||||||
|
# add_header X-Frame-Options SAMEORIGIN;
|
||||||
|
add_header X-Content-Type-Options nosniff;
|
||||||
|
|
||||||
|
## [Optional] If your certficate has OCSP, enable OCSP stapling to reduce the overhead and latency of running SSL.
|
||||||
|
## Replace with your ssl_trusted_certificate. For more info see:
|
||||||
|
## - https://medium.com/devops-programming/4445f4862461
|
||||||
|
## - https://www.ruby-forum.com/topic/4419319
|
||||||
|
## - https://www.digitalocean.com/community/tutorials/how-to-configure-ocsp-stapling-on-apache-and-nginx
|
||||||
|
# ssl_stapling on;
|
||||||
|
# ssl_stapling_verify on;
|
||||||
|
# ssl_trusted_certificate /etc/nginx/ssl/stapling.trusted.crt;
|
||||||
|
# resolver 208.67.222.222 208.67.222.220 valid=300s; # Can change to your DNS resolver if desired
|
||||||
|
# resolver_timeout 10s;
|
||||||
|
|
||||||
|
ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://$router_host:8092;
|
||||||
|
}
|
||||||
|
|
||||||
|
include includes/letsencrypt.conf;
|
||||||
|
}
|
||||||
@ -0,0 +1,19 @@
|
|||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $proxy_connection;
|
||||||
|
proxy_set_header Host $the_host;
|
||||||
|
proxy_set_header X-Forwarded-Host $the_host;
|
||||||
|
proxy_set_header X-Forwarded-Proto $the_scheme;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 0.0.0.0:80;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
client_max_body_size 4G;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://$router_host:8092;
|
||||||
|
}
|
||||||
|
|
||||||
|
include includes/letsencrypt.conf;
|
||||||
|
}
|
||||||
@ -0,0 +1,33 @@
|
|||||||
|
user nginx;
|
||||||
|
worker_processes 1;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log warn;
|
||||||
|
pid /tmp/nginx.pid;
|
||||||
|
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
http {
|
||||||
|
client_body_temp_path /tmp/client_temp;
|
||||||
|
proxy_temp_path /tmp/proxy_temp_path;
|
||||||
|
fastcgi_temp_path /tmp/fastcgi_temp;
|
||||||
|
uwsgi_temp_path /tmp/uwsgi_temp;
|
||||||
|
scgi_temp_path /tmp/scgi_temp;
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||||
|
'$status $body_bytes_sent "$http_referer" '
|
||||||
|
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||||
|
|
||||||
|
access_log /var/log/nginx/access.log main;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
|
||||||
|
keepalive_timeout 65;
|
||||||
|
|
||||||
|
include /etc/nginx/conf.d/*.conf;
|
||||||
|
}
|
||||||
@ -0,0 +1,32 @@
|
|||||||
|
resolver 127.0.0.11 valid=30s;
|
||||||
|
|
||||||
|
map $http_host $this_host {
|
||||||
|
"" $host;
|
||||||
|
default $http_host;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_x_forwarded_proto $the_scheme {
|
||||||
|
default $http_x_forwarded_proto;
|
||||||
|
"" $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_x_forwarded_host $the_host {
|
||||||
|
default $http_x_forwarded_host;
|
||||||
|
"" $host;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_x_forwarded_port $proxy_x_forwarded_port {
|
||||||
|
default $http_x_forwarded_port;
|
||||||
|
'' $server_port;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $http_upgrade $proxy_connection {
|
||||||
|
default upgrade;
|
||||||
|
"" close;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $ROUTER_HOST $router_host {
|
||||||
|
volatile;
|
||||||
|
default onlyoffice-router;
|
||||||
|
~^(.*)$ $1;
|
||||||
|
}
|
||||||
@ -0,0 +1,79 @@
|
|||||||
|
resolver $DNS_NAMESERVER valid=30s;
|
||||||
|
|
||||||
|
map $SERVICE_LOGIN $service_login {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5011;
|
||||||
|
default $SERVICE_LOGIN;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_DOCEDITOR $service_doceditor {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5013;
|
||||||
|
default $SERVICE_DOCEDITOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_API_SYSTEM $service_api_system {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5010;
|
||||||
|
default $SERVICE_API_SYSTEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_BACKUP $service_backup {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5012;
|
||||||
|
default $SERVICE_BACKUP;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_FILES $service_files {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5007;
|
||||||
|
default $SERVICE_FILES;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_PEOPLE_SERVER $service_people_server {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5004;
|
||||||
|
default $SERVICE_PEOPLE_SERVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_SOCKET $service_socket {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:9899;
|
||||||
|
default $SERVICE_SOCKET;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_API $service_api {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5000;
|
||||||
|
default $SERVICE_API;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_STUDIO $service_studio {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5003;
|
||||||
|
default $SERVICE_STUDIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_SSOAUTH $service_sso {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:9834;
|
||||||
|
default $SERVICE_SSOAUTH;
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_HELTHCHECKS $service_healthchecks {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5033;
|
||||||
|
default $SERVICE_HELTHCHECKS;
|
||||||
|
}
|
||||||
|
|
||||||
|
map "$DOCUMENT_SERVER_URL_EXTERNAL" "$document_server" {
|
||||||
|
volatile;
|
||||||
|
default "$DOCUMENT_SERVER_URL_EXTERNAL";
|
||||||
|
"" "http://$DOCUMENT_CONTAINER_NAME";
|
||||||
|
}
|
||||||
|
|
||||||
|
map $SERVICE_CLIENT $service_client {
|
||||||
|
volatile;
|
||||||
|
"" 127.0.0.1:5001;
|
||||||
|
default $SERVICE_CLIENT;
|
||||||
|
}
|
||||||
@ -0,0 +1,383 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
#######
|
||||||
|
x-healthcheck:
|
||||||
|
&x-healthcheck
|
||||||
|
test: curl --fail http://127.0.0.1 || exit 1
|
||||||
|
interval: 60s
|
||||||
|
retries: 5
|
||||||
|
start_period: 20s
|
||||||
|
timeout: 10s
|
||||||
|
|
||||||
|
x-service:
|
||||||
|
&x-service-base
|
||||||
|
container_name: base
|
||||||
|
restart: always
|
||||||
|
expose:
|
||||||
|
- ${SERVICE_PORT}
|
||||||
|
environment:
|
||||||
|
MYSQL_CONTAINER_NAME: ${MYSQL_CONTAINER_NAME}
|
||||||
|
MYSQL_HOST: ${MYSQL_HOST}
|
||||||
|
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
|
||||||
|
MYSQL_DATABASE: ${MYSQL_DATABASE}
|
||||||
|
MYSQL_USER: ${MYSQL_USER}
|
||||||
|
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
|
||||||
|
DATABASE_MIGRATION: ${DATABASE_MIGRATION}
|
||||||
|
APP_DOTNET_ENV: ${APP_DOTNET_ENV}
|
||||||
|
APP_KNOWN_NETWORKS: ${APP_KNOWN_NETWORKS}
|
||||||
|
APP_KNOWN_PROXIES: ${APP_KNOWN_PROXIES}
|
||||||
|
APP_CORE_BASE_DOMAIN: ${APP_CORE_BASE_DOMAIN}
|
||||||
|
APP_CORE_MACHINEKEY: ${APP_CORE_MACHINEKEY}
|
||||||
|
APP_URL_PORTAL: ${APP_URL_PORTAL}
|
||||||
|
INSTALLATION_TYPE: ${INSTALLATION_TYPE}
|
||||||
|
OAUTH_REDIRECT_URL: ${OAUTH_REDIRECT_URL}
|
||||||
|
DOCUMENT_SERVER_JWT_SECRET: ${DOCUMENT_SERVER_JWT_SECRET}
|
||||||
|
DOCUMENT_SERVER_JWT_HEADER: ${DOCUMENT_SERVER_JWT_HEADER}
|
||||||
|
DOCUMENT_SERVER_URL_PUBLIC: ${DOCUMENT_SERVER_URL_PUBLIC}
|
||||||
|
DOCUMENT_CONTAINER_NAME: ${DOCUMENT_CONTAINER_NAME}
|
||||||
|
DOCUMENT_SERVER_URL_EXTERNAL: ${DOCUMENT_SERVER_URL_EXTERNAL}
|
||||||
|
# KAFKA_HOST: ${KAFKA_HOST}
|
||||||
|
ELK_CONTAINER_NAME: ${ELK_CONTAINER_NAME}
|
||||||
|
ELK_SHEME: ${ELK_SHEME}
|
||||||
|
ELK_HOST: ${ELK_HOST}
|
||||||
|
ELK_PORT: ${ELK_PORT}
|
||||||
|
REDIS_CONTAINER_NAME: ${REDIS_CONTAINER_NAME}
|
||||||
|
REDIS_HOST: ${REDIS_HOST}
|
||||||
|
REDIS_PORT: ${REDIS_PORT}
|
||||||
|
REDIS_USER_NAME: ${REDIS_USER_NAME}
|
||||||
|
REDIS_PASSWORD: ${REDIS_PASSWORD}
|
||||||
|
RABBIT_CONTAINER_NAME: ${RABBIT_CONTAINER_NAME}
|
||||||
|
RABBIT_HOST: ${RABBIT_HOST}
|
||||||
|
RABBIT_PORT: ${RABBIT_PORT}
|
||||||
|
RABBIT_VIRTUAL_HOST: ${RABBIT_VIRTUAL_HOST}
|
||||||
|
RABBIT_USER_NAME: ${RABBIT_USER_NAME}
|
||||||
|
RABBIT_PASSWORD: ${RABBIT_PASSWORD}
|
||||||
|
ROUTER_HOST: ${ROUTER_HOST}
|
||||||
|
LOG_LEVEL: ${LOG_LEVEL}
|
||||||
|
DEBUG_INFO: ${DEBUG_INFO}
|
||||||
|
volumes:
|
||||||
|
- ./data/app_data:/app/onlyoffice/data # changed
|
||||||
|
- files_data:/var/www/products/ASC.Files/server/
|
||||||
|
- people_data:/var/www/products/ASC.People/server/
|
||||||
|
# added
|
||||||
|
depends_on:
|
||||||
|
onlyoffice-migration-runner:
|
||||||
|
condition: service_completed_successfully
|
||||||
|
onlyoffice-mysql-server:
|
||||||
|
condition: service_healthy
|
||||||
|
#######
|
||||||
|
|
||||||
|
#######
|
||||||
|
services:
|
||||||
|
onlyoffice-mysql-server:
|
||||||
|
image: ${MYSQL_IMAGE}
|
||||||
|
command: --default-authentication-plugin=caching_sha2_password
|
||||||
|
cap_add:
|
||||||
|
- SYS_NICE
|
||||||
|
container_name: ${MYSQL_CONTAINER_NAME}
|
||||||
|
restart: always
|
||||||
|
# tty: true
|
||||||
|
user: mysql
|
||||||
|
expose:
|
||||||
|
- "3306"
|
||||||
|
ports:
|
||||||
|
- 33060:3306
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
|
||||||
|
MYSQL_DATABASE: ${MYSQL_DATABASE}
|
||||||
|
MYSQL_USER: ${MYSQL_USER}
|
||||||
|
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
|
||||||
|
volumes:
|
||||||
|
- ./data/mysql_data:/var/lib/mysql # changed
|
||||||
|
- ./config/mysql/conf.d/:/etc/mysql/conf.d
|
||||||
|
# added
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
|
||||||
|
timeout: 20s
|
||||||
|
retries: 10
|
||||||
|
|
||||||
|
onlyoffice-migration-runner:
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-migration-runner:${DOCKER_TAG}"
|
||||||
|
container_name: ${MIGRATION_RUNNER_HOST}
|
||||||
|
restart: "no"
|
||||||
|
environment:
|
||||||
|
MYSQL_CONTAINER_NAME: ${MYSQL_CONTAINER_NAME}
|
||||||
|
MYSQL_HOST: ${MYSQL_HOST}
|
||||||
|
MYSQL_DATABASE: ${MYSQL_DATABASE}
|
||||||
|
MYSQL_USER: ${MYSQL_USER}
|
||||||
|
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
|
||||||
|
# added
|
||||||
|
depends_on:
|
||||||
|
onlyoffice-mysql-server:
|
||||||
|
condition: service_healthy
|
||||||
|
#######
|
||||||
|
|
||||||
|
|
||||||
|
#######
|
||||||
|
onlyoffice-rabbitmq:
|
||||||
|
image: rabbitmq:3
|
||||||
|
container_name: ${RABBIT_CONTAINER_NAME}
|
||||||
|
restart: always
|
||||||
|
expose:
|
||||||
|
- "5672"
|
||||||
|
- "80"
|
||||||
|
|
||||||
|
onlyoffice-redis:
|
||||||
|
image: redis:7
|
||||||
|
container_name: ${REDIS_CONTAINER_NAME}
|
||||||
|
restart: always
|
||||||
|
expose:
|
||||||
|
- "6379"
|
||||||
|
|
||||||
|
onlyoffice-elasticsearch:
|
||||||
|
image: onlyoffice/elasticsearch:${ELK_VERSION}
|
||||||
|
container_name: ${ELK_CONTAINER_NAME}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- discovery.type=single-node
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- "ES_JAVA_OPTS=-Xms1g -Xmx1g -Dlog4j2.formatMsgNoLookups=true" # changed Xms4g > Xms1g
|
||||||
|
- "indices.fielddata.cache.size=30%"
|
||||||
|
- "indices.memory.index_buffer_size=30%"
|
||||||
|
- "ingest.geoip.downloader.enabled=false"
|
||||||
|
ulimits:
|
||||||
|
# memlock: # changed for LXC
|
||||||
|
# soft: -1 # changed for LXC
|
||||||
|
# hard: -1 # changed for LXC
|
||||||
|
nofile:
|
||||||
|
soft: 65535
|
||||||
|
hard: 65535
|
||||||
|
volumes:
|
||||||
|
- ./data/es_data:/usr/share/elasticsearch/data # changed
|
||||||
|
expose:
|
||||||
|
- "9200"
|
||||||
|
- "9300"
|
||||||
|
#######
|
||||||
|
|
||||||
|
#######
|
||||||
|
onlyoffice-backup-background-tasks:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-backup-background:${DOCKER_TAG}"
|
||||||
|
container_name: ${BACKUP_BACKGRUOND_TASKS_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_BACKUP_BACKGRUOND_TASKS}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-backup:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-backup:${DOCKER_TAG}"
|
||||||
|
container_name: ${BACKUP_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_BACKUP}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-clear-events:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-clear-events:${DOCKER_TAG}"
|
||||||
|
container_name: ${CLEAR_EVENTS_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_CLEAR_EVENTS}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-files:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-files:${DOCKER_TAG}"
|
||||||
|
container_name: ${FILES_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_FILES}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-files-services:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-files-services:${DOCKER_TAG}"
|
||||||
|
container_name: ${FILES_SERVICES_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_FILES_SERVICES}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-people-server:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-people-server:${DOCKER_TAG}"
|
||||||
|
container_name: ${PEOPLE_SERVER_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_PEOPLE_SERVER}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-socket:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-socket:${DOCKER_TAG}"
|
||||||
|
container_name: ${SOCKET_HOST}
|
||||||
|
expose:
|
||||||
|
- ${SERVICE_PORT}
|
||||||
|
|
||||||
|
onlyoffice-studio-notify:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-studio-notify:${DOCKER_TAG}"
|
||||||
|
container_name: ${STUDIO_NOTIFY_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_STUDIO_NOTIFY}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-api:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-api:${DOCKER_TAG}"
|
||||||
|
container_name: ${API_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_API}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-api-system:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-api-system:${DOCKER_TAG}"
|
||||||
|
container_name: ${API_SYSTEM_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_API_SYSTEM}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-studio:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-studio:${DOCKER_TAG}"
|
||||||
|
container_name: ${STUDIO_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_STUDIO}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-ssoauth:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-ssoauth:${DOCKER_TAG}"
|
||||||
|
container_name: ${SSOAUTH_HOST}
|
||||||
|
expose:
|
||||||
|
- ${SERVICE_PORT}
|
||||||
|
- "9834"
|
||||||
|
|
||||||
|
onlyoffice-doceditor:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-doceditor:${DOCKER_TAG}"
|
||||||
|
container_name: ${DOCEDITOR_HOST}
|
||||||
|
expose:
|
||||||
|
- "5013"
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_DOCEDITOR}/health || exit 1
|
||||||
|
|
||||||
|
onlyoffice-login:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-login:${DOCKER_TAG}"
|
||||||
|
container_name: ${LOGIN_HOST}
|
||||||
|
expose:
|
||||||
|
- "5011"
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_LOGIN}/health || exit 1
|
||||||
|
#######
|
||||||
|
|
||||||
|
#######
|
||||||
|
onlyoffice-router:
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-router:${DOCKER_TAG}"
|
||||||
|
container_name: ${ROUTER_HOST}
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: nginx -t || exit 1
|
||||||
|
expose:
|
||||||
|
- "8081"
|
||||||
|
- "8099"
|
||||||
|
- "8092"
|
||||||
|
depends_on:
|
||||||
|
- onlyoffice-backup-background-tasks
|
||||||
|
- onlyoffice-backup
|
||||||
|
- onlyoffice-clear-events
|
||||||
|
- onlyoffice-files
|
||||||
|
- onlyoffice-files-services
|
||||||
|
- onlyoffice-people-server
|
||||||
|
- onlyoffice-socket
|
||||||
|
- onlyoffice-studio-notify
|
||||||
|
- onlyoffice-api
|
||||||
|
- onlyoffice-api-system
|
||||||
|
- onlyoffice-studio
|
||||||
|
- onlyoffice-ssoauth
|
||||||
|
- onlyoffice-doceditor
|
||||||
|
- onlyoffice-login
|
||||||
|
environment:
|
||||||
|
- SERVICE_BACKUP=${SERVICE_BACKUP}
|
||||||
|
- SERVICE_FILES=${SERVICE_FILES}
|
||||||
|
- SERVICE_FILES_SERVICES=${SERVICE_FILES_SERVICES}
|
||||||
|
- SERVICE_CLEAR_EVENTS=${SERVICE_CLEAR_EVENTS}
|
||||||
|
- SERVICE_NOTIFY=${SERVICE_NOTIFY}
|
||||||
|
- SERVICE_PEOPLE_SERVER=${SERVICE_PEOPLE_SERVER}
|
||||||
|
- SERVICE_SOCKET=${SERVICE_SOCKET}
|
||||||
|
- SERVICE_STUDIO_NOTIFY=${SERVICE_STUDIO_NOTIFY}
|
||||||
|
- SERVICE_API=${SERVICE_API}
|
||||||
|
- SERVICE_API_SYSTEM=${SERVICE_API_SYSTEM}
|
||||||
|
- SERVICE_STUDIO=${SERVICE_STUDIO}
|
||||||
|
- SERVICE_SSOAUTH=${SERVICE_SSOAUTH}
|
||||||
|
- SERVICE_DOCEDITOR=${SERVICE_DOCEDITOR}
|
||||||
|
- SERVICE_LOGIN=${SERVICE_LOGIN}
|
||||||
|
- SERVICE_HELTHCHECKS=${SERVICE_HELTHCHECKS}
|
||||||
|
- WRONG_PORTAL_NAME_URL=${WRONG_PORTAL_NAME_URL}
|
||||||
|
- DOCUMENT_CONTAINER_NAME=${DOCUMENT_CONTAINER_NAME}
|
||||||
|
- DOCUMENT_SERVER_URL_EXTERNAL=${DOCUMENT_SERVER_URL_EXTERNAL}
|
||||||
|
- REDIS_CONTAINER_NAME=${REDIS_CONTAINER_NAME}
|
||||||
|
- REDIS_HOST=${REDIS_HOST}
|
||||||
|
- REDIS_PORT=${REDIS_PORT}
|
||||||
|
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||||
|
- SERVICE_PORT=${SERVICE_PORT}
|
||||||
|
volumes:
|
||||||
|
- ./data/router_log:/var/log/nginx
|
||||||
|
|
||||||
|
onlyoffice-proxy:
|
||||||
|
image: nginx
|
||||||
|
container_name: ${PROXY_HOST}
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: nginx -t || exit 1
|
||||||
|
ports:
|
||||||
|
- ${EXTERNAL_PORT}:80
|
||||||
|
# - 443:443 # for selfsigned ssl
|
||||||
|
environment:
|
||||||
|
- ROUTER_HOST=${ROUTER_HOST}
|
||||||
|
volumes:
|
||||||
|
- ./data/webroot_path:/letsencrypt # changed
|
||||||
|
- ./data/proxy_log:/var/log/nginx # changed
|
||||||
|
- ./config/nginx/templates/nginx.conf.template:/etc/nginx/nginx.conf
|
||||||
|
- ./config/nginx/letsencrypt.conf:/etc/nginx/includes/letsencrypt.conf
|
||||||
|
- ./config/nginx/templates/proxy.upstream.conf.template:/etc/nginx/templates/proxy.upstream.conf.template:ro
|
||||||
|
- ./config/nginx/onlyoffice-proxy.conf:/etc/nginx/conf.d/default.conf
|
||||||
|
# - ${CERTIFICATE_PATH}:/usr/local/share/ca-certificates/tls.crt # for selfsigned ssl
|
||||||
|
# - ${CERTIFICATE_KEY_PATH}:/etc/ssl/private/tls.key # for selfsigned ssl
|
||||||
|
# - ${DHPARAM_PATH}:/etc/ssl/certs/dhparam.pem # for selfsigned ssl
|
||||||
|
|
||||||
|
#######
|
||||||
|
|
||||||
|
#######
|
||||||
|
onlyoffice-notify:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-notify:${DOCKER_TAG}"
|
||||||
|
container_name: ${NOTIFY_HOST}
|
||||||
|
healthcheck:
|
||||||
|
<<: *x-healthcheck
|
||||||
|
test: curl --fail http://${SERVICE_NOTIFY}/health/ || exit 1
|
||||||
|
|
||||||
|
onlyoffice-health-checks-ui:
|
||||||
|
<<: *x-service-base
|
||||||
|
image: "${REPO}/${DOCKER_IMAGE_PREFIX}-healthchecks:${DOCKER_TAG}"
|
||||||
|
container_name: ${HELTHCHECKS_HOST}
|
||||||
|
#######
|
||||||
|
|
||||||
|
networks:
|
||||||
|
onlyoffice:
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
files_data:
|
||||||
|
people_data:
|
||||||
|
# mysql_data:
|
||||||
|
# es_data:
|
||||||
|
# router_log:
|
||||||
|
# proxy_log:
|
||||||
|
# webroot_path:
|
||||||
|
# app_data:
|
||||||
|
# crm_data:
|
||||||
|
# project_data:
|
||||||
|
# calendar_data:
|
||||||
|
# mail_data:
|
||||||
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue