main
Badanin Maksim 1 year ago
commit 6fae65c3d7

128
.env

@ -0,0 +1,128 @@
# This file is a template for docker compose deployment
# Copy this file to .env and change the values as needed
#----------ADDED------------
SECRET_PASSWORD=<CHANGE>
POSTGRES_PASSWORD=$SECRET_PASSWORD
#---------------------------
# AppFlowy Cloud
## URL that connects to the gotrue docker container
APPFLOWY_GOTRUE_BASE_URL=http://gotrue:9999
## URL that connects to the postgres docker container
APPFLOWY_DATABASE_URL=postgres://postgres:$POSTGRES_PASSWORD@postgres:5432/postgres #CHANGED
APPFLOWY_ACCESS_CONTROL=true
APPFLOWY_WEBSOCKET_MAILBOX_SIZE=6000
APPFLOWY_DATABASE_MAX_CONNECTIONS=40
# admin frontend
## URL that connects to redis docker container
ADMIN_FRONTEND_REDIS_URL=redis://redis:6379
## URL that connects to gotrue docker container
ADMIN_FRONTEND_GOTRUE_URL=http://gotrue:9999
# authentication key, change this and keep the key safe and secret
# self defined key, you can use any string
GOTRUE_JWT_SECRET=$SECRET_PASSWORD #CHANGED
# Expiration time in seconds for the JWT token
GOTRUE_JWT_EXP=7200
# User sign up will automatically be confirmed if this is set to true.
# If you have OAuth2 set up or smtp configured, you can set this to false
# to enforce email confirmation or OAuth2 login instead.
# If you set this to false, you need to either set up SMTP
GOTRUE_MAILER_AUTOCONFIRM=false
# Number of emails that can be per minute
GOTRUE_RATE_LIMIT_EMAIL_SENT=100
# If you intend to use mail confirmation, you need to set the SMTP configuration below
# You would then need to set GOTRUE_MAILER_AUTOCONFIRM=false
# Check for logs in gotrue service if there are any issues with email confirmation
GOTRUE_SMTP_HOST=<CHANGE>
GOTRUE_SMTP_PORT=<CHANGE>
GOTRUE_SMTP_USER=<CHANGE>
GOTRUE_SMTP_PASS=<CHANGE>
GOTRUE_SMTP_ADMIN_EMAIL=<CHANGE>
# This user will be created when AppFlowy Cloud starts successfully
# You can use this user to login to the admin panel
GOTRUE_ADMIN_EMAIL=<CHANGE>
GOTRUE_ADMIN_PASSWORD=$SECRET_PASSWORD #CHANGED
# User will be redirected to this after Email or OAuth login
# Change this to your own domain where you host the docker-compose or gotrue
# If you are using a different domain, you need to change the redirect_uri in the OAuth2 configuration
# Make sure that this domain is accessible to the user
API_EXTERNAL_URL=<CHANGE>
# In docker environment, `postgres` is the hostname of the postgres service
# GoTrue connect to postgres using this url
GOTRUE_DATABASE_URL=postgres://supabase_auth_admin:root@postgres:5432/postgres
# Refer to this for details: https://github.com/AppFlowy-IO/AppFlowy-Cloud/blob/main/doc/AUTHENTICATION.md
# Google OAuth2
GOTRUE_EXTERNAL_GOOGLE_ENABLED=false
GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID=
GOTRUE_EXTERNAL_GOOGLE_SECRET=
GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI=http://your-host/gotrue/callback
# GitHub OAuth2
GOTRUE_EXTERNAL_GITHUB_ENABLED=false
GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=
GOTRUE_EXTERNAL_GITHUB_SECRET=
GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=http://your-host/gotrue/callback
# Discord OAuth2
GOTRUE_EXTERNAL_DISCORD_ENABLED=false
GOTRUE_EXTERNAL_DISCORD_CLIENT_ID=
GOTRUE_EXTERNAL_DISCORD_SECRET=
GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI=http://your-host/gotrue/callback
# File Storage
# This is where storage like images, files, etc. will be stored
# By default, Minio is used as the default file storage which uses host's file system
APPFLOWY_S3_USE_MINIO=true
APPFLOWY_S3_MINIO_URL=http://minio:9000 # change this if you are using a different address for minio
APPFLOWY_S3_ACCESS_KEY=minioadmin
APPFLOWY_S3_SECRET_KEY=$SECRET_PASSWORD #CHANGED
APPFLOWY_S3_BUCKET=appflowy
#APPFLOWY_S3_REGION=us-east-1
# AppFlowy Cloud Mailer
APPFLOWY_MAILER_SMTP_HOST=$GOTRUE_SMTP_HOST #CHANGED
APPFLOWY_MAILER_SMTP_PORT=$GOTRUE_SMTP_PORT #CHANGED
APPFLOWY_MAILER_SMTP_USERNAME=$GOTRUE_SMTP_USER #CHANGED
APPFLOWY_MAILER_SMTP_PASSWORD=$GOTRUE_SMTP_PASS #CHANGED
# Log level for the appflowy-cloud service
RUST_LOG=info
# PgAdmin
# Optional module to manage the postgres database
# You can access the pgadmin at http://your-host/pgadmin
# Refer to the APPFLOWY_DATABASE_URL for password when connecting to the database
PGADMIN_DEFAULT_EMAIL=$GOTRUE_ADMIN_EMAIL #CHANGED
PGADMIN_DEFAULT_PASSWORD=$SECRET_PASSWORD #CHANGED
# Portainer (username: admin)
PORTAINER_PASSWORD=$SECRET_PASSWORD #CHANGED
# Cloudflare tunnel token
CLOUDFLARE_TUNNEL_TOKEN=
# If you are using a different postgres database, change the following values
# GOTRUE_DATABASE_URL=postgres://supabase_auth_admin:root@<host>:<port>/$POSTGRES_DB
# APPFLOWY_DATABASE_URL=postgres://POSTGRES_USER:$POSTGRES_PASSWORD@$POSTGRES_HOST:5432/$POSTGRES_DB
# AppFlowy AI
APPFLOWY_AI_OPENAI_API_KEY=
APPFLOWY_AI_SERVER_PORT=5001
APPFLOWY_AI_SERVER_HOST=ai
APPFLOWY_AI_DATABASE_URL=postgresql+psycopg://postgres:$POSTGRES_PASSWORD@postgres:5432/postgres #CHANGED
# AppFlowy History
APPFLOWY_HISTORY_URL=http://localhost:50051
APPFLOWY_HISTORY_REDIS_URL=redis://redis:6379
APPFLOWY_HISTORY_DATABASE_URL=postgres://postgres:$POSTGRES_PASSWORD@postgres:5432/postgres #CHANGED
# AppFlowy Indexer
APPFLOWY_INDEXER_DATABASE_URL=postgres://postgres:$POSTGRES_PASSWORD@postgres:5432/postgres #CHANGED
APPFLOWY_INDEXER_REDIS_URL=redis://redis:6379

@ -0,0 +1,35 @@
# Установка AppFlowy-Cloud
Основан на [https://github.com/AppFlowy-IO/AppFlowy-Cloud](https://github.com/AppFlowy-IO/AppFlowy-Cloud)
#### Параметры переменных в файле .env
```
SECRET_PASSWORD=<CHANGE> # Админские пароли и JWT
GOTRUE_MAILER_AUTOCONFIRM=false # Отключить автоподтвержение почты
GOTRUE_SMTP_HOST и APPFLOWY_MAILER_SMTP_HOST # имя почтового сервера. Например: mail.example.com
GOTRUE_SMTP_PORT и APPFLOWY_MAILER_SMTP_PORT # порт почтового сервера. Например: 25
GOTRUE_SMTP_USER и APPFLOWY_MAILER_SMTP_USERNAME # учетная запись
GOTRUE_SMTP_PASS и APPFLOWY_MAILER_SMTP_PASSWORD # пароль
GOTRUE_SMTP_ADMIN_EMAIL # адрес рассылки. Например: admin@example.com
GOTRUE_ADMIN_EMAIL и PGADMIN_DEFAULT_EMAIL # Почта входа администратора. Например: admin@example.com
API_EXTERNAL_URL # Домен сайта. Например: https://af.example.com
```
#### Запуск
```
git clone https://git.badms.ru/bms/appflowy.git
cd appflowy
# Предварительно отредактировать переменные
docker compose up -d
```

@ -0,0 +1,46 @@
-- Add migration script here
-- Required by uuid_generate_v4()
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- user table
CREATE TABLE IF NOT EXISTS af_user (
uid BIGINT PRIMARY KEY,
uuid UUID NOT NULL , -- related to gotrue
email TEXT NOT NULL DEFAULT '' UNIQUE, -- not needed when authenticated with gotrue
password TEXT NOT NULL DEFAULT '', -- not needed when authenticated with gotrue
name TEXT NOT NULL DEFAULT '',
metadata JSONB DEFAULT '{}'::JSONB, -- used to user's metadata such as avatar, OpenAI key, etc.
encryption_sign TEXT DEFAULT NULL, -- used to encrypt the user's data
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE OR REPLACE FUNCTION update_updated_at_column_func() RETURNS TRIGGER AS $$ BEGIN NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_af_user_modtime BEFORE
UPDATE ON af_user FOR EACH ROW EXECUTE PROCEDURE update_updated_at_column_func();
CREATE OR REPLACE FUNCTION prevent_reset_encryption_sign_func() RETURNS TRIGGER AS $$ BEGIN IF OLD.encryption_sign IS NOT NULL
AND NEW.encryption_sign IS DISTINCT
FROM OLD.encryption_sign THEN RAISE EXCEPTION 'The encryption sign can not be reset once it has been set';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trigger_prevent_reset_encryption_sign BEFORE
UPDATE ON af_user FOR EACH ROW EXECUTE FUNCTION prevent_reset_encryption_sign_func();
-- Enable RLS on the af_user table
-- Policy for INSERT
ALTER TABLE af_user ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_user_insert_policy ON public.af_user FOR
INSERT TO anon,
authenticated WITH CHECK (true);
-- Policy for UPDATE
CREATE POLICY af_user_update_policy ON public.af_user FOR
UPDATE USING (true) WITH CHECK (true);
-- Policy for SELECT
CREATE POLICY af_user_select_policy ON public.af_user FOR
SELECT TO anon,
authenticated USING (true);
ALTER TABLE af_user FORCE ROW LEVEL SECURITY;

@ -0,0 +1,70 @@
-- Create the af_roles table
CREATE TABLE IF NOT EXISTS af_roles (
id SERIAL PRIMARY KEY,
name TEXT UNIQUE NOT NULL
);
-- Insert default roles
INSERT INTO af_roles (name)
VALUES ('Owner'),
('Member'),
('Guest');
CREATE TABLE af_permissions (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
access_level INTEGER NOT NULL,
description TEXT
);
-- Insert default permissions
INSERT INTO af_permissions (name, description, access_level)
VALUES ('Read only', 'Can read', 10),
(
'Read and comment',
'Can read and comment, but not edit',
20
),
(
'Read and write',
'Can read and edit, but not share with others',
30
),
(
'Full access',
'Can edit and share with others',
50
);
-- Represents a permission that a role has. The list of all permissions a role has can be obtained by querying this table for all rows with a given role_id.
CREATE TABLE af_role_permissions (
role_id INT REFERENCES af_roles(id),
permission_id INT REFERENCES af_permissions(id),
PRIMARY KEY (role_id, permission_id)
);
-- Associate permissions with roles
WITH role_ids AS (
SELECT id,
name
FROM af_roles
WHERE name IN ('Owner', 'Member', 'Guest')
),
permission_ids AS (
SELECT id,
name
FROM af_permissions
WHERE name IN ('Full access', 'Read and write', 'Read only')
)
INSERT INTO af_role_permissions (role_id, permission_id)
SELECT r.id,
p.id
FROM role_ids r
CROSS JOIN permission_ids p
WHERE (
r.name = 'Owner'
AND p.name = 'Full access'
)
OR (
r.name = 'Member'
AND p.name = 'Read and write'
)
OR (
r.name = 'Guest'
AND p.name = 'Read only'
);

@ -0,0 +1,81 @@
-- af_workspace contains all the workspaces. Each workspace contains a list of members defined in af_workspace_member
CREATE TABLE IF NOT EXISTS af_workspace (
workspace_id UUID NOT NULL PRIMARY KEY DEFAULT uuid_generate_v4(),
database_storage_id UUID NOT NULL DEFAULT uuid_generate_v4(),
owner_uid BIGINT NOT NULL REFERENCES af_user(uid) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- 0: Free
workspace_type INTEGER NOT NULL DEFAULT 0,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
workspace_name TEXT DEFAULT 'My Workspace'
);
-- Enable RLS on the af_workspace table
ALTER TABLE af_workspace ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_workspace_policy ON af_workspace FOR ALL TO anon,
authenticated USING (true);
ALTER TABLE af_workspace FORCE ROW LEVEL SECURITY;
-- af_workspace_member contains all the members associated with a workspace and their roles.
CREATE TABLE IF NOT EXISTS af_workspace_member (
uid BIGINT NOT NULL,
role_id INT NOT NULL REFERENCES af_roles(id),
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (uid, workspace_id)
);
-- Enable RLS on the af_workspace_member table
ALTER TABLE af_workspace_member ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_workspace_member_policy ON af_workspace_member FOR ALL TO anon,
authenticated USING (true);
ALTER TABLE af_workspace_member FORCE ROW LEVEL SECURITY;
-- Listener for af_workspace_member table
DROP TRIGGER IF EXISTS af_workspace_member_change_trigger ON af_workspace_member;
CREATE OR REPLACE FUNCTION notify_af_workspace_member_change() RETURNS trigger AS $$
DECLARE
payload TEXT;
BEGIN
payload := json_build_object(
'old', row_to_json(OLD),
'new', row_to_json(NEW),
'action_type', TG_OP
)::text;
PERFORM pg_notify('af_workspace_member_channel', payload);
-- Return the new row state for INSERT/UPDATE, and the old state for DELETE.
IF TG_OP = 'DELETE' THEN
RETURN OLD;
ELSE
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_workspace_member_change_trigger
AFTER INSERT OR UPDATE OR DELETE ON af_workspace_member
FOR EACH ROW EXECUTE FUNCTION notify_af_workspace_member_change();
-- Index
CREATE UNIQUE INDEX idx_af_workspace_member ON af_workspace_member (uid, workspace_id, role_id);
-- Insert a workspace member if the user with given uid is the owner of the workspace
CREATE OR REPLACE FUNCTION insert_af_workspace_member_if_owner(
p_uid BIGINT,
p_role_id INT,
p_workspace_id UUID
) RETURNS VOID AS $$ BEGIN -- If user is the owner, proceed with the insert operation
INSERT INTO af_workspace_member (uid, role_id, workspace_id)
SELECT p_uid,
p_role_id,
p_workspace_id
FROM af_workspace
WHERE workspace_id = p_workspace_id
AND owner_uid = p_uid;
-- Check if the insert operation was successful. If not, user is not the owner of the workspace.
IF NOT FOUND THEN RAISE EXCEPTION 'Unsupported operation: User is not the owner of the workspace.';
END IF;
END;
$$ LANGUAGE plpgsql;

@ -0,0 +1,18 @@
-- af_user_profile_view is a view that contains all the user profiles and their latest workspace_id.
-- a subquery is first used to find the workspace_id of the workspace with the latest updated_at timestamp for each
-- user. This subquery is then joined with the af_user table to create the view. Note that a LEFT JOIN is used in
-- case there are users without workspaces, in which case latest_workspace_id will be NULL for those users.
CREATE OR REPLACE VIEW af_user_profile_view AS
SELECT u.*,
w.workspace_id AS latest_workspace_id
FROM af_user u
INNER JOIN (
SELECT uid,
workspace_id,
rank() OVER (
PARTITION BY uid
ORDER BY updated_at DESC
) AS rn
FROM af_workspace_member
) w ON u.uid = w.uid
AND w.rn = 1;

@ -0,0 +1,77 @@
-- collab update table.
CREATE TABLE IF NOT EXISTS af_collab (
oid TEXT NOT NULL,
blob BYTEA NOT NULL,
len INTEGER,
partition_key INTEGER NOT NULL,
encrypt INTEGER DEFAULT 0,
owner_uid BIGINT NOT NULL,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
PRIMARY KEY (oid, partition_key)
) PARTITION BY LIST (partition_key);
CREATE TABLE af_collab_document PARTITION OF af_collab FOR
VALUES IN (0);
CREATE TABLE af_collab_database PARTITION OF af_collab FOR
VALUES IN (1);
CREATE TABLE af_collab_w_database PARTITION OF af_collab FOR
VALUES IN (2);
CREATE TABLE af_collab_folder PARTITION OF af_collab FOR
VALUES IN (3);
CREATE TABLE af_collab_database_row PARTITION OF af_collab FOR
VALUES IN (4);
CREATE TABLE af_collab_user_awareness PARTITION OF af_collab FOR
VALUES IN (5);
CREATE TABLE af_collab_member (
uid BIGINT REFERENCES af_user(uid) ON DELETE CASCADE,
oid TEXT NOT NULL,
permission_id INTEGER REFERENCES af_permissions(id) NOT NULL,
PRIMARY KEY(uid, oid)
);
-- Listener
DROP TRIGGER IF EXISTS af_collab_member_change_trigger ON af_collab_member;
CREATE OR REPLACE FUNCTION notify_af_collab_member_change() RETURNS trigger AS $$
DECLARE
payload TEXT;
BEGIN
payload := json_build_object(
'old', row_to_json(OLD),
'new', row_to_json(NEW),
'action_type', TG_OP
)::text;
PERFORM pg_notify('af_collab_member_channel', payload);
-- Return the new row state for INSERT/UPDATE, and the old state for DELETE.
IF TG_OP = 'DELETE' THEN
RETURN OLD;
ELSE
RETURN NEW;
END IF;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_collab_member_change_trigger
AFTER INSERT OR UPDATE OR DELETE ON af_collab_member
FOR EACH ROW EXECUTE FUNCTION notify_af_collab_member_change();
-- collab snapshot. It will be used to store the snapshots of the collab.
CREATE TABLE IF NOT EXISTS af_collab_snapshot (
sid BIGSERIAL PRIMARY KEY,-- snapshot id
oid TEXT NOT NULL,
blob BYTEA NOT NULL,
len INTEGER NOT NULL,
encrypt INTEGER DEFAULT 0,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_af_collab_snapshot_oid ON af_collab_snapshot(oid);
-- Enable RLS on the af_collab table
ALTER TABLE af_collab ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_collab_policy ON af_collab FOR ALL TO anon,
authenticated USING (true);
ALTER TABLE af_collab FORCE ROW LEVEL SECURITY;

@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS af_blob_metadata (
workspace_id UUID REFERENCES af_workspace(workspace_id) ON DELETE CASCADE NOT NULL,
file_id VARCHAR NOT NULL,
file_type VARCHAR NOT NULL,
file_size BIGINT NOT NULL,
modified_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
UNIQUE (workspace_id, file_id)
);

@ -0,0 +1,25 @@
-- Add migration script here
-- Drop the existing trigger if it exists
DROP TRIGGER IF EXISTS af_user_change_trigger ON af_user;
-- Create or replace the function
CREATE OR REPLACE FUNCTION notify_af_user_change() RETURNS TRIGGER AS $$
DECLARE
payload TEXT;
BEGIN
payload := json_build_object(
'payload', row_to_json(NEW),
'action_type', TG_OP
)::text;
PERFORM pg_notify('af_user_channel', payload);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Create the trigger
CREATE TRIGGER af_user_change_trigger
AFTER UPDATE ON af_user
FOR EACH ROW
EXECUTE FUNCTION notify_af_user_change();

@ -0,0 +1,35 @@
GRANT SELECT, INSERT, UPDATE, DELETE ON public.af_user TO supabase_auth_admin;
-- Trigger Function to delete a user from the pulic.af_user table
-- when a user is deleted from auth.users table (with matching uuid) field
CREATE OR REPLACE FUNCTION public.delete_user()
RETURNS TRIGGER AS $$
BEGIN
DELETE FROM public.af_user WHERE uuid = OLD.id;
RETURN OLD;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER delete_user_trigger
AFTER DELETE ON auth.users
FOR EACH ROW EXECUTE FUNCTION public.delete_user();
-- Trigger Function to update the 'deleted_at' field in the pulic.af_user table
-- (Soft Delete)
CREATE OR REPLACE FUNCTION public.update_af_user_deleted_at()
RETURNS TRIGGER AS $$
BEGIN
-- Check if 'deleted_at' field is modified
IF OLD.deleted_at IS DISTINCT FROM NEW.deleted_at THEN
-- Update 'deleted_at' in public.af_user
UPDATE public.af_user
SET deleted_at = NEW.deleted_at
WHERE uuid = NEW.id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_af_user_deleted_at_trigger
AFTER UPDATE OF deleted_at ON auth.users
FOR EACH ROW EXECUTE FUNCTION public.update_af_user_deleted_at();

@ -0,0 +1,21 @@
-- Revert the last migration
REVOKE SELECT, INSERT, UPDATE, DELETE ON public.af_user FROM supabase_auth_admin;
DROP TRIGGER delete_user_trigger ON auth.users;
DROP TRIGGER update_af_user_deleted_at_trigger ON auth.users;
DROP FUNCTION public.delete_user();
DROP FUNCTION public.update_af_user_deleted_at();
-- Delete all users from public.af_user table that are not in auth.users table
DELETE FROM public.af_user
WHERE NOT EXISTS (
SELECT 1
FROM auth.users
WHERE af_user.uuid = users.id
);
-- Add foreign key constraint to public.af_user table
ALTER TABLE public.af_user
ADD CONSTRAINT af_user_email_foreign_key
FOREIGN KEY (uuid)
REFERENCES auth.users(id)
ON DELETE CASCADE;

@ -0,0 +1,20 @@
CREATE OR REPLACE FUNCTION af_workspace_insert_trigger()
RETURNS TRIGGER AS $$
BEGIN
-- Insert a record into af_workspace_member
INSERT INTO public.af_workspace_member (
uid, role_id,
workspace_id, created_at, updated_at)
VALUES (
NEW.owner_uid, (SELECT id FROM public.af_roles WHERE name = 'Owner'),
NEW.workspace_id, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
-- Return the new record to complete the insert operation
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_workspace_after_insert
AFTER INSERT ON public.af_workspace
FOR EACH ROW
EXECUTE FUNCTION af_workspace_insert_trigger();

@ -0,0 +1 @@
ALTER TABLE af_workspace ADD COLUMN icon TEXT NOT NULL DEFAULT '';

@ -0,0 +1,3 @@
-- Add migration script here
ALTER TABLE af_collab_member
ADD COLUMN created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT (NOW());

@ -0,0 +1,63 @@
CREATE TABLE IF NOT EXISTS af_workspace_invitation (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
workspace_id UUID NOT NULL,
inviter BIGINT NOT NULL,
invitee BIGINT NOT NULL,
role_id INT NOT NULL,
status SMALLINT NOT NULL DEFAULT 0, -- 0: pending, 1: accepted, 2: rejected
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_af_workspace_invitation_inviter ON af_workspace_invitation (inviter);
CREATE INDEX idx_af_workspace_invitation_invitee ON af_workspace_invitation (invitee);
-- Auto update updated_at column upon status change
CREATE OR REPLACE FUNCTION update_af_workspace_invitation_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
IF OLD.status IS DISTINCT FROM NEW.status THEN
NEW.updated_at = CURRENT_TIMESTAMP;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_workspace_invitation_status_update
BEFORE UPDATE ON af_workspace_invitation
FOR EACH ROW
WHEN (OLD.status IS DISTINCT FROM NEW.status)
EXECUTE FUNCTION update_af_workspace_invitation_updated_at_column();
-- Auto add to af_workspace_member upon invitation accepted
CREATE OR REPLACE FUNCTION add_to_af_workspace_member()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.status = 1 THEN
-- workspace permission
INSERT INTO af_workspace_member (workspace_id, uid, role_id)
VALUES (NEW.workspace_id, NEW.invitee, NEW.role_id)
ON CONFLICT (workspace_id, uid) DO NOTHING;
-- collab permission
INSERT INTO af_collab_member (uid, oid, permission_id)
VALUES (
NEW.invitee,
NEW.workspace_id,
(SELECT permission_id
FROM public.af_role_permissions
WHERE public.af_role_permissions.role_id = NEW.role_id)
)
ON CONFLICT (uid, oid)
DO UPDATE
SET permission_id = excluded.permission_id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_workspace_invitation_accepted
BEFORE UPDATE ON af_workspace_invitation
FOR EACH ROW
WHEN (OLD.status IS DISTINCT FROM NEW.status AND NEW.status = 1)
EXECUTE FUNCTION add_to_af_workspace_member();

@ -0,0 +1,34 @@
ALTER TABLE af_workspace_invitation ADD COLUMN invitee_email TEXT NOT NULL;
-- Auto add to af_workspace_member upon invitation accepted
CREATE OR REPLACE FUNCTION add_to_af_workspace_member()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.status = 1 THEN
-- workspace permission
INSERT INTO af_workspace_member (workspace_id, uid, role_id)
VALUES (
NEW.workspace_id,
(SELECT uid FROM af_user WHERE email = NEW.invitee_email),
NEW.role_id
)
ON CONFLICT (workspace_id, uid) DO NOTHING;
-- collab permission
INSERT INTO af_collab_member (uid, oid, permission_id)
VALUES (
(SELECT uid FROM af_user WHERE email = NEW.invitee_email),
NEW.workspace_id,
(SELECT permission_id
FROM public.af_role_permissions
WHERE public.af_role_permissions.role_id = NEW.role_id)
)
ON CONFLICT (uid, oid)
DO UPDATE
SET permission_id = excluded.permission_id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
ALTER TABLE af_workspace_invitation DROP COLUMN invitee;

@ -0,0 +1,53 @@
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- create af_snapshot_meta table
CREATE TABLE IF NOT EXISTS af_snapshot_meta(
oid TEXT NOT NULL,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
snapshot BYTEA NOT NULL,
snapshot_version INTEGER NOT NULL,
partition_key INTEGER NOT NULL,
created_at BIGINT NOT NULL,
metadata JSONB,
PRIMARY KEY (oid, created_at, partition_key)
) PARTITION BY LIST (partition_key);
CREATE TABLE af_snapshot_meta_document PARTITION OF af_snapshot_meta FOR
VALUES IN (0);
CREATE TABLE af_snapshot_meta_database PARTITION OF af_snapshot_meta FOR
VALUES IN (1);
CREATE TABLE af_snapshot_meta_workspace_database PARTITION OF af_snapshot_meta FOR
VALUES IN (2);
CREATE TABLE af_snapshot_meta_folder PARTITION OF af_snapshot_meta FOR
VALUES IN (3);
CREATE TABLE af_snapshot_meta_database_row PARTITION OF af_snapshot_meta FOR
VALUES IN (4);
CREATE TABLE af_snapshot_meta_user_awareness PARTITION OF af_snapshot_meta FOR
VALUES IN (5);
-- create af_snapshot_state table
CREATE TABLE IF NOT EXISTS af_snapshot_state(
snapshot_id UUID NOT NULL DEFAULT uuid_generate_v4(),
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
oid TEXT NOT NULL,
doc_state BYTEA NOT NULL,
doc_state_version INTEGER NOT NULL,
deps_snapshot_id UUID,
partition_key INTEGER NOT NULL,
created_at BIGINT NOT NULL,
PRIMARY KEY (snapshot_id, partition_key)
) PARTITION BY LIST (partition_key);
CREATE TABLE af_snapshot_state_document PARTITION OF af_snapshot_state FOR
VALUES IN (0);
CREATE TABLE af_snapshot_state_database PARTITION OF af_snapshot_state FOR
VALUES IN (1);
CREATE TABLE af_snapshot_state_workspace_database PARTITION OF af_snapshot_state FOR
VALUES IN (2);
CREATE TABLE af_snapshot_state_folder PARTITION OF af_snapshot_state FOR
VALUES IN (3);
CREATE TABLE af_snapshot_state_database_row PARTITION OF af_snapshot_state FOR
VALUES IN (4);
CREATE TABLE af_snapshot_state_user_awareness PARTITION OF af_snapshot_state FOR
VALUES IN (5);
-- Index for af_snapshot_state table
CREATE INDEX IF NOT EXISTS idx_snapshot_state_oid_created ON af_snapshot_state (oid, created_at DESC);

@ -0,0 +1,27 @@
-- Add migration script here
-- Create table for chat documents
CREATE TABLE af_chat
(
chat_id UUID PRIMARY KEY,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
name TEXT NOT NULL DEFAULT '',
rag_ids JSONB NOT NULL DEFAULT '[]',
workspace_id UUID NOT NULL,
FOREIGN KEY (workspace_id) REFERENCES af_workspace (workspace_id) ON DELETE CASCADE
);
-- Create table for chat messages
CREATE TABLE af_chat_messages
(
message_id BIGSERIAL PRIMARY KEY,
author JSONB NOT NULL,
chat_id UUID NOT NULL,
content TEXT NOT NULL,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
edited_at TIMESTAMP DEFAULT NULL,
FOREIGN KEY (chat_id) REFERENCES af_chat (chat_id) ON DELETE CASCADE
);
CREATE INDEX idx_chat_messages_chat_id_created_at ON af_chat_messages (message_id ASC, created_at ASC);

@ -0,0 +1,3 @@
-- Add migration script here
ALTER TABLE af_workspace ADD COLUMN search_token_usage BIGINT NOT NULL DEFAULT 0;
ALTER TABLE af_workspace ADD COLUMN index_token_usage BIGINT NOT NULL DEFAULT 0;

@ -0,0 +1,7 @@
-- Add migration script here
ALTER TABLE af_chat
ADD COLUMN meta_data JSONB DEFAULT '{}' NOT NULL;
ALTER TABLE af_chat_messages
ADD COLUMN meta_data JSONB DEFAULT '{}' NOT NULL,
ADD COLUMN reply_message_id BIGINT;

@ -0,0 +1,2 @@
-- Add migration script here
ALTER TABLE af_workspace ADD COLUMN settings JSONB;

@ -0,0 +1,30 @@
-- stores the published view of a workspace by a user of workspace
CREATE TABLE IF NOT EXISTS af_published_collab (
doc_name TEXT NOT NULL,
published_by BIGINT NOT NULL REFERENCES af_user(uid) ON DELETE CASCADE,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
metadata JSONB NOT NULL,
blob BYTEA NOT NULL DEFAULT '',
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (workspace_id, doc_name)
);
-- trigger to update updated_at column
CREATE OR REPLACE FUNCTION update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER af_published_collab_update_updated_at
BEFORE UPDATE ON af_published_collab
FOR EACH ROW
EXECUTE FUNCTION update_updated_at();
-- every workspace have a prefix for published view
ALTER TABLE af_workspace ADD COLUMN publish_namespace TEXT UNIQUE;
CREATE INDEX IF NOT EXISTS publish_namespace_idx ON af_workspace(publish_namespace);

@ -0,0 +1,24 @@
DO $$
BEGIN
-- Add migration script here
CREATE EXTENSION IF NOT EXISTS vector;
-- create table to store collab embeddings
CREATE TABLE IF NOT EXISTS af_collab_embeddings
(
fragment_id TEXT NOT NULL PRIMARY KEY,
oid TEXT NOT NULL,
partition_key INTEGER NOT NULL,
content_type INTEGER NOT NULL,
indexed_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT (NOW()),
content TEXT,
embedding VECTOR(1536),
FOREIGN KEY (oid, partition_key) REFERENCES af_collab (oid, partition_key) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS af_collab_embeddings_similarity_idx ON af_collab_embeddings USING hnsw (embedding vector_cosine_ops);
EXCEPTION WHEN OTHERS THEN
RAISE NOTICE 'could not create "vector" extension, ignoring this migration';
END;
$$ LANGUAGE plpgsql;

@ -0,0 +1,7 @@
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_blob_metadata ON af_blob_metadata (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_chat ON af_chat (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_collab_snapshot ON af_collab_snapshot (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_collab ON af_collab (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_snapshot_meta ON af_snapshot_meta (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_snapshot_state ON af_snapshot_state (workspace_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_workspace_member ON af_workspace_member (workspace_id);

@ -0,0 +1,23 @@
CREATE TABLE IF NOT EXISTS af_workspace_ai_usage (
created_at DATE NOT NULL, -- day level of granularity
workspace_id UUID NOT NULL, -- workspace id for which the usage is being recorded
search_requests INT, -- number of search requests made
search_tokens_consumed BIGINT, -- number of tokens consumed for search requests
index_tokens_consumed BIGINT, -- number of tokens consumed for indexing documents
PRIMARY KEY (created_at, workspace_id)
);
-- migrate token usage data from af_workspace to af_workspace_ai_usage
INSERT INTO af_workspace_ai_usage (created_at, workspace_id, search_tokens_consumed, index_tokens_consumed)
SELECT
now()::date as created_at,
workspace_id,
search_token_usage as search_tokens_consumed,
index_token_usage as index_tokens_consumed
FROM af_workspace
WHERE search_token_usage IS NOT NULL
OR index_token_usage IS NOT NULL;
-- drop the redundant columns from af_workspace
ALTER TABLE af_workspace DROP COLUMN IF EXISTS search_token_usage;
ALTER TABLE af_workspace DROP COLUMN IF EXISTS index_token_usage;

@ -0,0 +1,6 @@
ALTER TABLE af_published_collab ADD COLUMN view_id UUID NOT NULL DEFAULT gen_random_uuid();
ALTER TABLE af_published_collab DROP CONSTRAINT af_published_collab_pkey;
ALTER TABLE af_published_collab ADD PRIMARY KEY (workspace_id, view_id);
CREATE INDEX IF NOT EXISTS idx_workspace_id_on_af_published_collab ON af_published_collab (workspace_id);
CREATE INDEX IF NOT EXISTS idx_published_by_on_af_published_collab ON af_published_collab (published_by);

@ -0,0 +1 @@
ALTER TABLE af_published_collab RENAME COLUMN doc_name TO publish_name;

@ -0,0 +1,6 @@
-- Add a unique constraint on publish_name
ALTER TABLE public.af_published_collab
ADD CONSTRAINT unique_publish_name UNIQUE (publish_name);
-- Add an index on publish_name
CREATE INDEX idx_publish_name ON public.af_published_collab (publish_name);

@ -0,0 +1,7 @@
-- Drop the existing unique constraint on publish_name
ALTER TABLE public.af_published_collab
DROP CONSTRAINT unique_publish_name;
-- Add a new unique constraint for the combination of publish_name and workspace_id
ALTER TABLE public.af_published_collab
ADD CONSTRAINT unique_workspace_id_publish_name UNIQUE (workspace_id, publish_name);

@ -0,0 +1,9 @@
-- Update existing null values to ensure no nulls are present before adding NOT NULL constraint
UPDATE public.af_workspace
SET publish_namespace = uuid_generate_v4()::text
WHERE publish_namespace IS NULL;
-- Alter the column to set NOT NULL constraint and a default value
ALTER TABLE public.af_workspace
ALTER COLUMN publish_namespace SET NOT NULL,
ALTER COLUMN publish_namespace SET DEFAULT uuid_generate_v4()::text;

@ -0,0 +1,3 @@
ALTER TABLE public.af_workspace_member
ADD CONSTRAINT af_workspace_member_uid_fkey
FOREIGN KEY (uid) REFERENCES af_user(uid) ON DELETE CASCADE;

@ -0,0 +1,37 @@
-- Add migration script here
-- Create the anon and authenticated roles if they don't exist
CREATE OR REPLACE FUNCTION create_roles(roles text []) RETURNS void LANGUAGE plpgsql AS $$
DECLARE role_name text;
BEGIN FOREACH role_name IN ARRAY roles LOOP IF NOT EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = role_name
) THEN EXECUTE 'CREATE ROLE ' || role_name;
END IF;
END LOOP;
END;
$$;
SELECT create_roles(ARRAY ['anon', 'authenticated']);
-- Create supabase_admin user if it does not exist
DO $$ BEGIN IF NOT EXISTS (
SELECT
FROM pg_catalog.pg_roles
WHERE rolname = 'supabase_admin'
) THEN CREATE USER supabase_admin LOGIN CREATEROLE CREATEDB REPLICATION BYPASSRLS;
END IF;
END $$;
-- Create supabase_auth_admin user if it does not exist
DO $$ BEGIN IF NOT EXISTS (
SELECT
FROM pg_catalog.pg_roles
WHERE rolname = 'supabase_auth_admin'
) THEN CREATE USER supabase_auth_admin BYPASSRLS NOINHERIT CREATEROLE LOGIN NOREPLICATION PASSWORD 'root';
END IF;
END $$;
-- Create auth schema if it does not exist
CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_auth_admin;
-- Grant permissions
GRANT CREATE ON DATABASE postgres TO supabase_auth_admin;
-- Set search_path for supabase_auth_admin
ALTER USER supabase_auth_admin SET search_path = 'auth';

@ -0,0 +1,187 @@
# Minimal nginx configuration for AppFlowy-Cloud
# Self Hosted AppFlowy Cloud user should alter this file to suit their needs
events {
worker_connections 1024;
}
http {
# docker dns resolver
resolver 127.0.0.11 valid=10s;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 8080;
# https://github.com/nginxinc/nginx-prometheus-exporter
location = /stub_status {
stub_status;
}
}
server {
ssl_certificate /etc/nginx/ssl/certificate.crt;
ssl_certificate_key /etc/nginx/ssl/private_key.key;
listen 80;
listen 443 ssl;
client_max_body_size 10M;
underscores_in_headers on;
# GoTrue
location /gotrue/ {
set $gotrue gotrue;
proxy_pass http://$gotrue:9999;
rewrite ^/gotrue(/.*)$ $1 break;
# Allow headers like redirect_to to be handed over to the gotrue
# for correct redirecting
proxy_set_header Host $http_host;
proxy_pass_request_headers on;
}
# WebSocket
location /ws {
set $appflowy_cloud appflowy_cloud;
proxy_pass http://$appflowy_cloud:8000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_read_timeout 86400;
}
# AppFlowy-Cloud
# created a separate location block for handling CORS preflight (OPTIONS) requests specifically for the /api endpoint.
location = /api/options {
if ($http_origin ~* (http://127.0.0.1:8000)) {
add_header 'Access-Control-Allow-Origin' $http_origin;
}
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PUT, DELETE, PATCH';
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization, Accept, Client-Version';
add_header 'Access-Control-Max-Age' 3600;
add_header 'Content-Type' 'text/plain; charset=utf-8';
add_header 'Content-Length' 0;
return 204;
}
location /api/chat {
set $appflowy_cloud appflowy_cloud;
proxy_pass http://$appflowy_cloud:8000;
proxy_http_version 1.1;
proxy_set_header Connection "";
chunked_transfer_encoding on;
proxy_buffering off;
proxy_cache off;
proxy_read_timeout 600s;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
}
location /api {
set $appflowy_cloud appflowy_cloud;
proxy_pass http://$appflowy_cloud:8000;
proxy_set_header X-Request-Id $request_id;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Set CORS headers for other requests
if ($http_origin ~* (http://127.0.0.1:8000)) {
add_header 'Access-Control-Allow-Origin' $http_origin always;
}
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, PATCH' always;
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization, Accept, Client-Version' always;
add_header 'Access-Control-Max-Age' 3600 always;
location ~* ^/api/workspace/([a-zA-Z0-9_-]+)/publish$ {
set $appflowy_cloud appflowy_cloud;
proxy_pass http://$appflowy_cloud:8000;
proxy_request_buffering off;
client_max_body_size 256M;
}
}
# AppFlowy AI
location /ai {
set $ai ai;
proxy_pass http://$ai:5001;
proxy_set_header Host $host;
proxy_pass_request_headers on;
}
# Minio Web UI
# Derive from: https://min.io/docs/minio/linux/integrations/setup-nginx-proxy-with-minio.html
# Optional Module, comment this section if you are did not deploy minio in docker-compose.yml
location /minio/ {
set $minio minio;
proxy_pass http://$minio:9001;
rewrite ^/minio/(.*) /$1 break;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-NginX-Proxy true;
## This is necessary to pass the correct IP to be hashed
real_ip_header X-Real-IP;
proxy_connect_timeout 300;
## To support websockets in MinIO versions released after January 2023
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Some environments may encounter CORS errors (Kubernetes + Nginx Ingress)
# Uncomment the following line to set the Origin request to an empty string
# proxy_set_header Origin '';
chunked_transfer_encoding off;
}
# PgAdmin
# Optional Module, comment this section if you are did not deploy pgadmin in docker-compose.yml
location /pgadmin/ {
set $pgadmin pgadmin;
proxy_pass http://$pgadmin:80;
proxy_set_header X-Script-Name /pgadmin;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $host;
proxy_redirect off;
}
# Portainer
# Optional Module, comment this section if you are did not deploy portainer in docker-compose.yml
location /portainer/ {
set $portainer portainer;
proxy_pass http://$portainer:9000;
rewrite ^/portainer/(.*) /$1 break;
}
# Admin Frontend
# Optional Module, comment this section if you are did not deploy admin_frontend in docker-compose.yml
location / {
set $admin_frontend admin_frontend;
proxy_pass http://$admin_frontend:3000;
proxy_set_header X-Scheme $scheme;
proxy_set_header Host $host;
}
}
}

@ -0,0 +1,31 @@
-----BEGIN CERTIFICATE-----
MIIFRDCCAywCCQDXwkFioxoJ2TANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQGEwJV
UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJU3Vubnl2YWxlMRYwFAYD
VQQKDA1BcHBGbG93eSxJbmMuMRQwEgYDVQQDDAthcHBmbG93eS5pbzAeFw0yMzAz
MTUwMDUxNDVaFw0yNDAzMTQwMDUxNDVaMGQxCzAJBgNVBAYTAlVTMRMwEQYDVQQI
DApDYWxpZm9ybmlhMRIwEAYDVQQHDAlTdW5ueXZhbGUxFjAUBgNVBAoMDUFwcEZs
b3d5LEluYy4xFDASBgNVBAMMC2FwcGZsb3d5LmlvMIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA09v/ouq4r7+oLOWqVscYpW5QRLR5O6OYZprocIARAtWA
qBkywhPku/SZq27dtPD7Pi3soSPkMhYDFALai4idgELCFxxkTuHWNm3J+Y8PcMq2
RX325/pQVpOMTkChqaUzh93ynYqv89x3lT9z4saknBde/WO2yOJ6sfED9w+ezYgm
34LV5Z0cofQTDEiTX58KV3MmG5hRMdBwCaDg1jUb3jKr5lBrF4+EHbAN5PWH282V
JdyOTvZp/CF5TcnAMONkYENjURpnSXJes34ufYHkmr0eDa+2pfc3TI5wlB3tPQyN
p+B5TsDCDofv0Zme5Ur42TWcwsG0WRvtDw2KZ98wBtGaIv8UEQjXipNQVzeCv5Yv
zeykjdDhOJ/OZFzsm2vtl55t52M43xYMo3QPmjHMiVYz9KVvPqrVo+O5PI7B7uwY
JxWQIJosUa6AeKfkCAQd8mlNqYylqV8Utqs8b4zee8Vf7hzaCYNKxyDvqsd3yDwE
umh5zVuxJitLchDFT4mv1v3yLHocusV2lwjfEk66R/o5BBDdviycxeAM5Q1lyi7M
RywHAt/eVQNaBq0HSa2vbIm5yTZNQYwuhnNgv035hf8vpu+tFOGepTqgy/CrYpPm
VnsClJVrmxd4LfUZ0aZODiKCVx6psfeBvTXu1r7/SjsmbvYE4ubfM66optyFc3cC
AwEAATANBgkqhkiG9w0BAQsFAAOCAgEAfhSUTP91rBP+8zvsoxomKdeClVFURczr
HO/VuXVmBoKLASFqFcT84usRhb5T6XB7o2GcCYSo0VFJw99UM6nLsZ7c0MKaAjT6
/9VeyLtDfhCDkffGGxpeYhme+0PY8TXIU5aO0ZhQwzXUOiC7t3Ac3AzHTPGrGqOK
aAGhMo7V2bQXcNR6NFhsUlJOtVE59MxL1K5Ug1oMn/H+NUF6/st+KzohruQJSSWG
GcbqVor0zZbFJSvRJ5P3ngw2cg2SIj9w6RwUWMp+a5kOx10fOYrQEHGyTHFlVDy0
yOCU4eVO3EVTm7Se1XVwmG3kNKQaLFJf1voMuYD2sFbZ0nhGJDSZOcGUrchUXPQB
C9MwU52OeNm5VwE/41wLoFvOkJ/I/Ak7vccl1YJXpefa6qjNOFm5X0jA7D4egDfC
IVs5m30qa6Birx0xS6RUuuvxLJyNzgLSzsC1eFwjR2uwIUrGpYo3YI4+bMxp2Wnk
6qtm5G8D1giWg6z0RLw+GSj4QfcJEBP+zyiH/MdB5te9kXVYLKyS+DGgTTatLi1l
MBBK5b7dvfwo08J/sksK+mPHHBsV9TPAkqMp9vuZw25pRAEnvjEOUPfi3X9EZDw7
A3LElN9KDks3IioioAOm0vPvHrlfziSljt5IMkuZLT3lUe6B3cs69caLO0S/ZDTz
yqGwLIqHYos=
-----END CERTIFICATE-----

@ -0,0 +1,52 @@
-----BEGIN PRIVATE KEY-----
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDT2/+i6rivv6gs
5apWxxilblBEtHk7o5hmmuhwgBEC1YCoGTLCE+S79Jmrbt208Ps+LeyhI+QyFgMU
AtqLiJ2AQsIXHGRO4dY2bcn5jw9wyrZFffbn+lBWk4xOQKGppTOH3fKdiq/z3HeV
P3PixqScF179Y7bI4nqx8QP3D57NiCbfgtXlnRyh9BMMSJNfnwpXcyYbmFEx0HAJ
oODWNRveMqvmUGsXj4QdsA3k9YfbzZUl3I5O9mn8IXlNycAw42RgQ2NRGmdJcl6z
fi59geSavR4Nr7al9zdMjnCUHe09DI2n4HlOwMIOh+/RmZ7lSvjZNZzCwbRZG+0P
DYpn3zAG0Zoi/xQRCNeKk1BXN4K/li/N7KSN0OE4n85kXOyba+2Xnm3nYzjfFgyj
dA+aMcyJVjP0pW8+qtWj47k8jsHu7BgnFZAgmixRroB4p+QIBB3yaU2pjKWpXxS2
qzxvjN57xV/uHNoJg0rHIO+qx3fIPAS6aHnNW7EmK0tyEMVPia/W/fIsehy6xXaX
CN8STrpH+jkEEN2+LJzF4AzlDWXKLsxHLAcC395VA1oGrQdJra9sibnJNk1BjC6G
c2C/TfmF/y+m760U4Z6lOqDL8Ktik+ZWewKUlWubF3gt9RnRpk4OIoJXHqmx94G9
Ne7Wvv9KOyZu9gTi5t8zrqim3IVzdwIDAQABAoICAQC7fCxdc5TfSx+8I767rtO7
ysTUGFZVFfCPlLTwohTryh9iI3KM1+gLAWpgkOs47i2ZGDEZZVbTkDFHK0NWSh7/
25RBuYl3WVolrsEXzaefbHUjSFcRca5Y/5ghxAaMx7qzmRHUo2AU0d0twgp+/MW9
sN0KJo0id3KXODAHGtaxErU8BV/fJEurcwDMVQm+jFMtkqR9tSzdhZUwoCN4zWUN
HRCM8EvlfMcxMpUJMtP5C5Ta/bUeYejnDIR593nSidlRazFgG5qeH8140Mi5nxK8
cXJAMGjVtNJGOKOeIGHLLenKT9dqfyD8lQYBGg7I4bEZH93LaHp+hT0jnhsG0zd+
pEvXCX9IEIzajKQPv6wdLup+p2hZBTq7SddILliM28y0vbNXhmNFBsrLnWTMeGpl
n0VoGytmFO/b09S2yd0glrBZZyFmnPzi0dVk341mFaFFcXot/xum73FWt2Cy77Vp
lGfFlI4TzQbmJKWYNPJl1BiZWOKSXWtNn7lVACBJfOjoifMHdzkjXSKxv21Nompy
Y08Bl2wSfM3plQ6kbmu7KsGuVE2OH1oBbYYxjwFWQAl/ISeEJztqWeLzTYvTFYCl
gA2NC1MJHdJZWU71m0XQN7Cs8mQ0lQhlCTm3Y7Mlbm8RCgJvmfPECyWX1KbeKzgq
AQ4tUotGeuMUX6KIoNX7+QKCAQEA9/jTGRuXu7zsDae9VAZHluZ55Jc1XRUPYgIr
AMLn3J88RD8XOaW9ZiIt+btfCx3WmxwFwLjD4g15QgwS1LoRGYzNjxCv04naplOT
pwfVT3Ry5BbfAMbx1GrGyNH9lh479aozvtXudL5QzirTXDSVlwQ4K3VvL4XHNtOh
3ZiFNReUKdu2fTPPPUc3vs2XAG3fWb8G7KWthbbdHAhl//gyZI6iAR2Fc3IHZGc4
+Xuqmlvccx3+ZWhksf4uOfzEluruYlv8AgczxtKB4tWsoYJbU71zdYHAtPaYua8w
6x9urD7vLNQ7TpagCD8q8V5jX+XG53HPBMJ5hRwy35bW0SJLEwKCAQEA2rfgByLa
00ghuyrF99ynwt5lZGk+WYR74xLn21PfP4vwWsdEnUNCDzBaZsKFOMsx5nctYVKk
ZfGCYRaLho4GNifkf6yy2QIMCq1bayYJlISDQXgjgVpZXjsnOotYjPDglGSIOhij
XaLSqGbvDt/VtIPwTeNHfSnsRQzzd0XxclbP96mIxLrcjvOUlWyavUVh58dkv/pi
R+7dE9b46zyCxc9OuTdZ9RAc6Op3DsHXk+Yuwrwh5r2rBEQxkqQ4//gQJKjGtfFS
YwI9bmsZnTYoalTtVjaDZ1mDlYbGgHzecplw59lQluuNJMFTEjpkK1E67oAFbkry
wRxnUkSYRq6+jQKCAQEA1BGI98ARVA2OE1+RG3sDXppdRJHMoX6RWVBhVpVZleTY
tcT/J94GzIIOr7T+45LxJlYg1WEupPTA7ytEL4mxdhhk9CVhOZh71iND82VPmFQO
reKhdRivWOq4dqagKPJSdRbKijqLZGwezzLw77pI9I43O3ODUzEl3k2/8LOvuGgh
3mp49zqH0fBGTHem3Eca7LXiRiCq9eAd2QuVsAOjlTwmcK2+o6yxhbyBjVul270U
G59bIX7WHyMyhYUW27qvhI8GRvXB4hfF3SjAKqBBWqx7QdNl612535NkUrDfBZAN
HFmlHuDSnDrpjuMaOblZEjbSxU9MffpPx8hIjzK04QKCAQEAtX2LCqDjkBr00okF
yU1ycAN3g0DJmiKTYrPXbWpFgEew5MMhrpWXBV+MRGT5g00pVSJjp7SZ8nXbSJEa
qkbD5MBpnYBC0EwgjeOYTms729+xwuvcGoRMUCMpxCzJB/sBgGGDoSG8vgBUaaUw
jdkzTh2FlDwaoEPfaNT8WmbRmZ1r6QjnEsg0KPL6wptiM9iVC22rrpooX6RYExR5
bUnDAj2qB4tkvDPoqWWV8crsBjAlcTYHs56DgIDN2e8n1U+UpbbfXS6ovLupGi0J
DilYlBNw9e86TtI6nCNAKHJ1bAbjZ6AufW1sq6k4M5H8eO1ox2u4FfNfSNs26U8+
RLjQKQKCAQAfd+u6/EEA4bMzVuE7SWpYD87eQ4edttjda4tlJBO48KFtqZ2bhIKd
sAEdw3txbcHiPazFQlNgKBfxq9JhGX8Dga/Wx/s/d5eTafqXfQk4hfHHqoYY4D+H
agDdP3QPVnfBueTcdKnGuD5Ex1pK4pnmnRkKQd1XOlV2w49PeoA1HG2PvsvSyfoa
yxfTIRsX78I4wClQywEnyGWKvsOGSP/zHHfKaCoic/KwDx5SVgeZCLgSoDHWdpuh
Vu5JGnIFQel7Y6+Zd92ubZ1vFUW7hW0JPHszSGqg2aE1m5RXsIanhTUCQusR3Pj+
Oi+igzFlcelWDZ/eQ8CpDkSpAqtwwx97
-----END PRIVATE KEY-----

@ -0,0 +1,171 @@
# Essential services for AppFlowy Cloud
version: '3'
services:
nginx:
container_name: appflowy_nginx #ADDED
restart: on-failure
image: nginx:1.24.0 #CHANGED
ports:
- 80:80 # Disable this if you are using TLS
- 443:443
volumes:
- ./configs/nginx/nginx.conf:/etc/nginx/nginx.conf #CHANGED
- ./configs/nginx/ssl/certificate.crt:/etc/nginx/ssl/certificate.crt #CHANGED
- ./configs/nginx/ssl/private_key.key:/etc/nginx/ssl/private_key.key #CHANGED
# You do not need this if you have configured to use your own s3 file storage
minio:
container_name: appflowy_minio #ADDED
restart: on-failure
image: minio/minio:RELEASE.2024-07-16T23-46-41Z #CHANGED not necessary
environment:
- MINIO_BROWSER_REDIRECT_URL=http://localhost/minio
- MINIO_ROOT_USER=${APPFLOWY_S3_ACCESS_KEY:-minioadmin}
- MINIO_ROOT_PASSWORD=${APPFLOWY_S3_SECRET_KEY:-minioadmin}
command: server /data --console-address ":9001"
volumes:
- ./data/minio_data:/data #CHANGED
postgres:
container_name: appflowy_postgres #ADDED
restart: on-failure
image: pgvector/pgvector:0.7.2-pg16 #ADDED
# build:
# context: ./postgres
# dockerfile: postgres.Dockerfile
environment:
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_DB=${POSTGRES_DB:-postgres}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
- POSTGRES_HOST=${POSTGRES_HOST:-postgres}
volumes:
- ./configs/migrations/before:/docker-entrypoint-initdb.d #CHANGED
- ./data/postgres_data:/var/lib/postgresql/data #CHANGED
redis:
container_name: appflowy_redis #ADDED
restart: on-failure
image: redis:7.2.5 #CHANGED
gotrue:
container_name: appflowy_gotrue #ADDED
restart: on-failure
# build:
# context: .
# dockerfile: docker/gotrue.Dockerfile
image: appflowyinc/gotrue:0.5.32 #CHANGED
environment:
# There are a lot of options to configure GoTrue. You can reference the example config:
# https://github.com/supabase/gotrue/blob/master/example.env
- GOTRUE_SITE_URL=appflowy-flutter:// # redirected to AppFlowy application
- URI_ALLOW_LIST=* # adjust restrict if necessary
- GOTRUE_JWT_SECRET=${GOTRUE_JWT_SECRET} # authentication secret
- GOTRUE_JWT_EXP=${GOTRUE_JWT_EXP}
- GOTRUE_DB_DRIVER=postgres
- API_EXTERNAL_URL=${API_EXTERNAL_URL}
- DATABASE_URL=${GOTRUE_DATABASE_URL}
- PORT=9999
- GOTRUE_SMTP_HOST=${GOTRUE_SMTP_HOST} # e.g. smtp.gmail.com
- GOTRUE_SMTP_PORT=${GOTRUE_SMTP_PORT} # e.g. 465
- GOTRUE_SMTP_USER=${GOTRUE_SMTP_USER} # email sender, e.g. noreply@appflowy.io
- GOTRUE_SMTP_PASS=${GOTRUE_SMTP_PASS} # email password
- GOTRUE_MAILER_URLPATHS_CONFIRMATION=/gotrue/verify
- GOTRUE_MAILER_URLPATHS_INVITE=/gotrue/verify
- GOTRUE_MAILER_URLPATHS_RECOVERY=/gotrue/verify
- GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE=/gotrue/verify
- GOTRUE_SMTP_ADMIN_EMAIL=${GOTRUE_SMTP_ADMIN_EMAIL} # email with admin privileges e.g. internal@appflowy.io
- GOTRUE_SMTP_MAX_FREQUENCY=${GOTRUE_SMTP_MAX_FREQUENCY:-1ns} # set to 1ns for running tests
- GOTRUE_RATE_LIMIT_EMAIL_SENT=${GOTRUE_RATE_LIMIT_EMAIL_SENT:-100} # number of email sendable per minute
- GOTRUE_MAILER_AUTOCONFIRM=${GOTRUE_MAILER_AUTOCONFIRM:-false} # change this to true to skip email confirmation
# Google OAuth config
- GOTRUE_EXTERNAL_GOOGLE_ENABLED=${GOTRUE_EXTERNAL_GOOGLE_ENABLED}
- GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID=${GOTRUE_EXTERNAL_GOOGLE_CLIENT_ID}
- GOTRUE_EXTERNAL_GOOGLE_SECRET=${GOTRUE_EXTERNAL_GOOGLE_SECRET}
- GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI=${GOTRUE_EXTERNAL_GOOGLE_REDIRECT_URI}
# GITHUB OAuth config
- GOTRUE_EXTERNAL_GITHUB_ENABLED=${GOTRUE_EXTERNAL_GITHUB_ENABLED}
- GOTRUE_EXTERNAL_GITHUB_CLIENT_ID=${GOTRUE_EXTERNAL_GITHUB_CLIENT_ID}
- GOTRUE_EXTERNAL_GITHUB_SECRET=${GOTRUE_EXTERNAL_GITHUB_SECRET}
- GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI=${GOTRUE_EXTERNAL_GITHUB_REDIRECT_URI}
# Discord OAuth config
- GOTRUE_EXTERNAL_DISCORD_ENABLED=${GOTRUE_EXTERNAL_DISCORD_ENABLED}
- GOTRUE_EXTERNAL_DISCORD_CLIENT_ID=${GOTRUE_EXTERNAL_DISCORD_CLIENT_ID}
- GOTRUE_EXTERNAL_DISCORD_SECRET=${GOTRUE_EXTERNAL_DISCORD_SECRET}
- GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI=${GOTRUE_EXTERNAL_DISCORD_REDIRECT_URI}
appflowy_cloud:
container_name: appflowy_cloud #ADDED
restart: on-failure
environment:
- RUST_LOG=${RUST_LOG:-info}
- APPFLOWY_ENVIRONMENT=production
- APPFLOWY_DATABASE_URL=${APPFLOWY_DATABASE_URL}
- APPFLOWY_REDIS_URI=redis://redis:6379
- APPFLOWY_GOTRUE_JWT_SECRET=${GOTRUE_JWT_SECRET}
- APPFLOWY_GOTRUE_JWT_EXP=${GOTRUE_JWT_EXP}
- APPFLOWY_GOTRUE_BASE_URL=${APPFLOWY_GOTRUE_BASE_URL}
- APPFLOWY_GOTRUE_EXT_URL=${API_EXTERNAL_URL}
- APPFLOWY_GOTRUE_ADMIN_EMAIL=${GOTRUE_ADMIN_EMAIL}
- APPFLOWY_GOTRUE_ADMIN_PASSWORD=${GOTRUE_ADMIN_PASSWORD}
- APPFLOWY_S3_USE_MINIO=${APPFLOWY_S3_USE_MINIO}
- APPFLOWY_S3_MINIO_URL=${APPFLOWY_S3_MINIO_URL}
- APPFLOWY_S3_ACCESS_KEY=${APPFLOWY_S3_ACCESS_KEY}
- APPFLOWY_S3_SECRET_KEY=${APPFLOWY_S3_SECRET_KEY}
- APPFLOWY_S3_BUCKET=${APPFLOWY_S3_BUCKET}
- APPFLOWY_S3_REGION=${APPFLOWY_S3_REGION}
- APPFLOWY_MAILER_SMTP_HOST=${APPFLOWY_MAILER_SMTP_HOST}
- APPFLOWY_MAILER_SMTP_PORT=${APPFLOWY_MAILER_SMTP_PORT}
- APPFLOWY_MAILER_SMTP_USERNAME=${APPFLOWY_MAILER_SMTP_USERNAME}
- APPFLOWY_MAILER_SMTP_PASSWORD=${APPFLOWY_MAILER_SMTP_PASSWORD}
- APPFLOWY_ACCESS_CONTROL=${APPFLOWY_ACCESS_CONTROL}
- APPFLOWY_DATABASE_MAX_CONNECTIONS=${APPFLOWY_DATABASE_MAX_CONNECTIONS}
- APPFLOWY_AI_SERVER_HOST=${APPFLOWY_AI_SERVER_HOST}
- APPFLOWY_AI_SERVER_PORT=${APPFLOWY_AI_SERVER_PORT}
- APPFLOWY_OPENAI_API_KEY=${APPFLOWY_OPENAI_API_KEY}
# build:
# context: .
# dockerfile: Dockerfile
# args:
# FEATURES: ""
image: appflowyinc/appflowy_cloud:0.5.32 #CHANGED
admin_frontend:
container_name: appflowy_admin #ADDED
restart: on-failure
# build:
# context: .
# dockerfile: ./admin_frontend/Dockerfile
image: appflowyinc/admin_frontend:0.5.32 #CHANGED
environment:
- RUST_LOG=${RUST_LOG:-info}
- ADMIN_FRONTEND_REDIS_URL=${ADMIN_FRONTEND_REDIS_URL:-redis://redis:6379}
- ADMIN_FRONTEND_GOTRUE_URL=${ADMIN_FRONTEND_GOTRUE_URL:-http://gotrue:9999}
- ADMIN_FRONTEND_APPFLOWY_CLOUD_URL=${ADMIN_FRONTEND_APPFLOWY_CLOUD_URL:-http://appflowy_cloud:8000}
# ai:
# container_name: appflowy_ai #ADDED
# restart: on-failure
# image: appflowyinc/appflowy_ai:0.5.16 #CHANGED
# environment:
# - OPENAI_API_KEY=${APPFLOWY_AI_OPENAI_API_KEY}
# - APPFLOWY_AI_SERVER_PORT=${APPFLOWY_AI_SERVER_PORT}
# - APPFLOWY_AI_DATABASE_URL=${APPFLOWY_AI_DATABASE_URL}
appflowy_history:
container_name: appflowy_history #ADDED
restart: on-failure
image: appflowyinc/appflowy_history:0.5.32 #CHANGED
# build:
# context: .
# dockerfile: ./services/appflowy-history/Dockerfile
environment:
- RUST_LOG=${RUST_LOG:-info}
- APPFLOWY_HISTORY_REDIS_URL=redis://redis:6379
- APPFLOWY_HISTORY_ENVIRONMENT=production
- APPFLOWY_HISTORY_DATABASE_URL=${APPFLOWY_HISTORY_DATABASE_URL}
#volumes:
# postgres_data:
# minio_data:
Loading…
Cancel
Save