Compare commits

...

71 Commits

Author SHA1 Message Date
dr-carrot
00e18a272a refactor: removed unnecessary import 2024-12-04 18:23:53 -05:00
dr-carrot
6957b7606e fix: null contraint with mediaAddedAt; fix psql col type 2024-12-04 18:21:20 -05:00
dr-carrot
0154e6b538 fix: incorrect current date function 2024-12-04 15:31:30 -05:00
dr-carrot
3d0166aaef fix: fix psql issue in user page; fix tiny psql error when selecting by empty list 2024-11-25 15:40:32 -05:00
dr-carrot
2fc8996606 Merge branch 'develop' of https://github.com/Fallenbagel/jellyseerr into v1.7.0/postgresql 2024-11-25 12:54:42 -05:00
dr-carrot
830f431e01 docs: update psql docs 2024-11-04 22:09:07 -05:00
dr-carrot
94efdf7a18 fix: fix issue with cypress tests 2024-11-04 21:04:18 -05:00
dr-carrot
ab5cdf5464 feat: migrate blacklist 2024-11-04 18:21:49 -05:00
dr-carrot
d3805d99e8 refactor: clean up code from cr comments 2024-11-04 18:10:43 -05:00
dr-carrot
7a5ee18e2c docs: update docs to add psql set up info 2024-11-04 18:09:39 -05:00
dr-carrot
96591a9ddd chore(pnpm-lock.yaml): updated pnpm-lock 2024-11-04 18:04:40 -05:00
dr-carrot
46c3af115a chore: merge from dev 2024-11-04 17:35:05 -05:00
dr-carrot
4e63cee12b Merge pull request #3 from dr-carrot/v1.7.0/postgresql-dev-merge
V1.7.0/postgresql dev merge
2024-08-18 23:26:54 -04:00
dr-carrot
5ee29823c6 fix: update pnpm lock with pg package 2024-08-18 21:07:17 -04:00
dr-carrot
2add7af5ec chore: merge from dev 2024-08-18 20:42:57 -04:00
dr-carrot
3b12c98242 chore: revverted ts commit 2024-08-18 20:32:54 -04:00
dr-carrot
bd7339a105 chore: restored CHANGELOG.md 2024-06-25 16:43:45 -04:00
dr-carrot
66e308ba1d Merge branch 'develop' of https://github.com/Fallenbagel/jellyseerr into v1.7.0/postgresql
# Conflicts:
#	README.md
#	yarn.lock
2024-06-25 16:35:43 -04:00
dr-carrot
357b927ab3 style: fixed prettier in docker-compose.pastgres.yaml 2024-06-21 13:38:55 -04:00
dr-carrot
f8926fa86c fix: changed version to 0.1.0 to remove ui notification 2024-06-21 13:15:18 -04:00
dr-carrot
e2992fea9c chore: merge from develop 2024-06-21 13:01:09 -04:00
dr-carrot
2b0d497370 fix: fixed indentation in psql docker-compose 2024-05-29 18:35:10 -04:00
dr-carrot
028012185c Merge branch 'develop' of https://github.com/Fallenbagel/jellyseerr into v1.7.0/postgresql 2024-05-29 18:10:29 -04:00
dr-carrot
94a9806089 fix: switch timestamp to include timezone 2024-05-29 18:04:35 -04:00
dr-carrot
caaed7c8b8 fix: updated ts schema to align with change to migration 2024-05-08 17:53:19 -04:00
dr-carrot
29452648e6 feat: created docker-compose postgres file 2024-05-08 17:51:15 -04:00
dr-carrot
f6b5a6fe9f Merge branch 'develop' of https://github.com/Fallenbagel/jellyseerr into v1.7.0/postgresql 2024-05-08 17:11:54 -04:00
dr-carrot
cf4e3fd579 Merge branch 'develop' of https://github.com/Fallenbagel/jellyseerr into v1.7.0/postgresql 2024-04-23 18:06:12 -04:00
dr-carrot
326d2cb4ca Merge remote-tracking branch 'og/develop' into v1.7.0/postgresql 2024-04-04 11:37:53 -04:00
dr-carrot
ff7bb884ae docs: added database option to bug template 2024-03-03 17:30:50 -05:00
dr-carrot
ef78fdd534 docs: added documentation for migration to postgres 2024-03-03 17:27:55 -05:00
dr-carrot
637e7dbd8e Merge remote-tracking branch 'og/develop' into v1.7.0/postgresql 2024-03-03 17:24:16 -05:00
dr-carrot
3ebf47fe61 fix: ensure requests are not disassociated with media 2024-01-27 19:58:58 -05:00
dr-carrot
88b67686ff fix: remove eager load 2024-01-27 19:56:28 -05:00
dr-carrot
86444b80b9 chore: cleanup extra psql code 2024-01-27 19:29:41 -05:00
dr-carrot
5f7679982a fix: added not null to migration so typeorm doesn't delete ids 2024-01-27 19:08:47 -05:00
dr-carrot
610c372498 chore: finish log cleanup 2024-01-27 19:07:57 -05:00
dr-carrot
7aca0be41c chore: more log cleanup 2024-01-27 18:34:55 -05:00
dr-carrot
0bab6887b0 chore: mild log cleanup 2024-01-27 18:18:16 -05:00
dr-carrot
0d6a1f12fb chore: added some more+ additional debug sanity checks 2024-01-27 18:00:42 -05:00
dr-carrot
87c8444ec6 chore: added some more additional debug sanity checks 2024-01-27 17:15:55 -05:00
dr-carrot
f08c537cba chore: added some more debug sanity checks 2024-01-27 17:13:55 -05:00
dr-carrot
b1b4dd9cfc chore: added some debug sanity checks 2024-01-27 17:07:10 -05:00
dr-carrot
eb111ac1db fix: removed psql specific relation checks 2024-01-22 17:34:02 -05:00
dr-carrot
106cd195d4 fix: added default dates to postgres migration 2024-01-20 23:58:00 -05:00
dr-carrot
42ad4e0ae3 chore: cleanup logs 2024-01-20 17:50:56 -05:00
dr-carrot
501859207a chore: small tweaks for the datasource. Added docs for db setup 2024-01-20 17:50:04 -05:00
dr-carrot
8c7004c50d feat: added logging option to other datasources 2024-01-20 14:50:24 -05:00
dr-carrot
b594dec992 chore: relocate pushover sound migration 2024-01-20 14:49:23 -05:00
dr-carrot
e93ab06504 fix: issue with session migration 2024-01-20 14:48:47 -05:00
dr-carrot
0581d7b6ad fix: added option to log queries 2024-01-20 14:24:59 -05:00
dr-carrot
00c811d10d fix: added pushover sound migration tto initial migration 2024-01-19 19:17:07 -05:00
dr-carrot
14d3ec22b0 fix: access order 2024-01-19 18:31:31 -05:00
dr-carrot
ed57911c7c fix: restored workflow actions 2024-01-19 17:22:48 -05:00
dr-carrot
e6cc2c55a2 Merged changes from develop 2024-01-19 17:19:58 -05:00
dr-carrot
4d85f29843 feat(postgres and migrations): added migrations for postgres & imporved ssl for postgres config
#186
2024-01-19 16:58:46 -05:00
Ryan Algar
44aaca0fb2 feat(db): add flag to toggle TLS for Postgres 2023-12-16 10:37:45 -08:00
Ryan Algar
abd80c1fa8 fix: don't use SQLite idiom when using PgSQL 2023-11-14 16:03:40 -08:00
Ryan Algar
b6592bf9f7 test(ci): temporarily change CI for local repo 2023-11-14 15:46:11 -08:00
Ryan Algar
573b64f901 test(pgsql): disable root certificate verification 2023-11-14 15:40:35 -08:00
zackhow
b39a5a7d82 feat: support for postgresql 2023-11-14 15:39:23 -08:00
semantic-release-bot
325e2ed6d3 chore(release): 1.7.0 2023-09-14 00:44:40 +00:00
Fallenbagel
e7c11da52b Merge pull request #477 from Fallenbagel/develop
Merge develop into main
2023-09-14 05:41:57 +05:00
semantic-release-bot
5712e19804 chore(release): 1.6.0 2023-08-04 20:43:24 +00:00
fallenbagel
4b549763e5 Merge branch 'develop' 2023-08-05 01:22:19 +05:00
semantic-release-bot
24151d27f7 chore(release): 1.5.0 2023-04-20 02:05:25 +00:00
Fallenbagel
f3cc8cba0a Merge pull request #368 from Fallenbagel/develop
Merge 'develop' into main
2023-04-20 07:02:36 +05:00
semantic-release-bot
57e7d68092 chore(release): 1.4.1 2023-01-31 00:20:50 +00:00
Fallenbagel
d3622f7bb3 Merge pull request #316 from Fallenbagel/develop
Merge develop into main
2023-01-31 05:15:41 +05:00
semantic-release-bot
20c821e2eb chore(release): 1.4.0 2023-01-29 20:33:10 +00:00
Fallenbagel
7b82ced5e6 Merge pull request #312 from Fallenbagel/develop
Merge 'origin/develop' into main
2023-01-30 01:31:00 +05:00
54 changed files with 706 additions and 24 deletions

View File

@@ -55,6 +55,14 @@ body:
- tablet - tablet
validations: validations:
required: true required: true
- type: dropdown
id: database
attributes:
options:
- SQLite (default)
- PostgreSQL
label: Database
description: Which database backend are you using?
- type: input - type: input
id: device id: device
attributes: attributes:

View File

@@ -0,0 +1,38 @@
---
version: '3.8'
services:
jellyseerr:
build:
context: .
dockerfile: Dockerfile.local
ports:
- '5055:5055'
environment:
DB_TYPE: 'postgres' # Which DB engine to use. The default is "sqlite". To use postgres, this needs to be set to "postgres"
DB_HOST: 'postgres' # The host (url) of the database
DB_PORT: '5432' # The port to connect to
DB_USER: 'jellyseerr' # Username used to connect to the database
DB_PASS: 'jellyseerr' # Password of the user used to connect to the database
DB_NAME: 'jellyseerr' # The name of the database to connect to
DB_LOG_QUERIES: 'false' # Whether to log the DB queries for debugging
DB_USE_SSL: 'false' # Whether to enable ssl for database connection
volumes:
- .:/app:rw,cached
- /app/node_modules
- /app/.next
depends_on:
- postgres
links:
- postgres
postgres:
image: postgres
environment:
POSTGRES_USER: jellyseerr
POSTGRES_PASSWORD: jellyseerr
POSTGRES_DB: jellyseerr
ports:
- '5432:5432'
volumes:
- postgres:/var/lib/postgresql/data
volumes:
postgres:

View File

@@ -17,6 +17,7 @@ Welcome to the Jellyseerr Documentation.
- **Mobile-friendly design**, for when you need to approve requests on the go. - **Mobile-friendly design**, for when you need to approve requests on the go.
- Granular permission system. - Granular permission system.
- Localization into other languages. - Localization into other languages.
- Support for PostgreSQL and SQLite databases.
- More features to come! - More features to come!
## Motivation ## Motivation

View File

@@ -0,0 +1,56 @@
---
title: Configuring the Database (Advanced)
description: Configure the database for Jellyseerr
sidebar_position: 2
---
# Configuring the Database
Jellyseerr supports SQLite and PostgreSQL. The database connection can be configured using the following environment variables:
## SQLite Options
```dotenv
DB_TYPE="sqlite" # Which DB engine to use, either "sqlite" or "postgres". The default is "sqlite".
CONFIG_DIRECTORY="config" # (optional) The path to the config directory where the db file is stored. The default is "config".
DB_LOG_QUERIES="false" # (optional) Whether to log the DB queries for debugging. The default is "false".
```
## PostgreSQL Options
```dotenv
DB_TYPE="postgres" # Which DB engine to use, either "sqlite" or "postgres". The default is "sqlite". To use postgres, this needs to be set to "postgres"
DB_HOST="localhost" # (optional) The host (url) of the database. The default is "localhost".
DB_PORT="5432" # (optional) The port to connect to. The default is "5432".
DB_USER= # (required) Username used to connect to the database
DB_PASS= # (required) Password of the user used to connect to the database
DB_NAME="jellyseerr" # (optional) The name of the database to connect to. The default is "jellyseerr".
DB_LOG_QUERIES="false" # (optional) Whether to log the DB queries for debugging. The default is "false".
```
### SSL configuration
The following options can be used to further configure ssl. Certificates can be provided as a string or a file path, with the string version taking precedence.
```dotenv
DB_USE_SSL="false" # (optional) Whether to enable ssl for database connection. This must be "true" to use the other ssl options. The default is "false".
DB_SSL_REJECT_UNAUTHORIZED="true" # (optional) Whether to reject ssl connections with unverifiable certificates i.e. self-signed certificates without providing the below settings. The default is "true".
DB_SSL_CA= # (optional) The CA certificate to verify the connection, provided as a string. The default is "".
DB_SSL_CA_FILE= # (optional) The path to a CA certificate to verify the connection. The default is "".
DB_SSL_KEY= # (optional) The private key for the connection in PEM format, provided as a string. The default is "".
DB_SSL_KEY_FILE= # (optinal) Path to the private key for the connection in PEM format. The default is "".
DB_SSL_CERT= # (optional) Certificate chain in pem format for the private key, provided as a string. The default is "".
DB_SSL_CERT_FILE= # (optional) Path to certificate chain in pem format for the private key. The default is "".
```
### Migrating from SQLite to PostgreSQL
1. Set up your PostgreSQL database and configure Jellyseerr to use it
2. Run Jellyseerr to create the tables in the PostgreSQL database
3. Stop Jellyseerr
4. Run the following command to export the data from the SQLite database and import it into the PostgreSQL database:
- Edit the postgres connection string to match your setup
- WARNING: The most recent release of pgloader has an issue quoting the table columns. Use the version in the docker container to avoid this issue.
- "I don't have or don't want to use docker" - You can build the working pgloader version [in this PR](https://github.com/dimitri/pgloader/pull/1531) from source and use the same options as below.
```bash
docker run --rm -v config/db.sqlite3:/db.sqlite3:ro -v pgloader/pgloader.load:/pgloader.load ghcr.io/ralgar/pgloader:pr-1531 pgloader --with "quote identifiers" --with "data only" /db.sqlite3 postgresql://{{DB_USER}}:{{DB_PASS}}@{{DB_HOST}}:{{DB_PORT}}/{{DB_NAME}}
```
5. Start Jellyseerr

View File

@@ -69,6 +69,7 @@
"node-schedule": "2.1.1", "node-schedule": "2.1.1",
"nodemailer": "6.9.1", "nodemailer": "6.9.1",
"openpgp": "5.7.0", "openpgp": "5.7.0",
"pg": "8.11.0",
"plex-api": "5.3.2", "plex-api": "5.3.2",
"pug": "3.0.2", "pug": "3.0.2",
"react": "^18.3.1", "react": "^18.3.1",

128
pnpm-lock.yaml generated
View File

@@ -49,7 +49,7 @@ importers:
version: 2.11.0 version: 2.11.0
connect-typeorm: connect-typeorm:
specifier: 1.1.4 specifier: 1.1.4
version: 1.1.4(typeorm@0.3.11(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5))) version: 1.1.4(typeorm@0.3.11(pg@8.11.0)(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)))
cookie-parser: cookie-parser:
specifier: 1.4.6 specifier: 1.4.6
version: 1.4.6 version: 1.4.6
@@ -119,6 +119,9 @@ importers:
openpgp: openpgp:
specifier: 5.7.0 specifier: 5.7.0
version: 5.7.0 version: 5.7.0
pg:
specifier: 8.11.0
version: 8.11.0
plex-api: plex-api:
specifier: 5.3.2 specifier: 5.3.2
version: 5.3.2 version: 5.3.2
@@ -193,7 +196,7 @@ importers:
version: 2.2.5(react@18.3.1) version: 2.2.5(react@18.3.1)
typeorm: typeorm:
specifier: 0.3.11 specifier: 0.3.11
version: 0.3.11(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)) version: 0.3.11(pg@8.11.0)(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5))
undici: undici:
specifier: ^6.20.1 specifier: ^6.20.1
version: 6.20.1 version: 6.20.1
@@ -3530,6 +3533,10 @@ packages:
buffer-from@1.1.2: buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
buffer-writer@2.0.0:
resolution: {integrity: sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==}
engines: {node: '>=4'}
buffer@5.7.1: buffer@5.7.1:
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
@@ -7050,6 +7057,9 @@ packages:
resolution: {integrity: sha512-wpgERjNkLrBiFmkMEjuZJEWKKDrNfHCKA1OhyN1wg1FrLkULbviEy6py1AyJUgZ72YWFbZ38FIpnqvVqAlDUwA==} resolution: {integrity: sha512-wpgERjNkLrBiFmkMEjuZJEWKKDrNfHCKA1OhyN1wg1FrLkULbviEy6py1AyJUgZ72YWFbZ38FIpnqvVqAlDUwA==}
engines: {node: '>=8'} engines: {node: '>=8'}
packet-reader@1.0.0:
resolution: {integrity: sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==}
parent-module@1.0.1: parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'} engines: {node: '>=6'}
@@ -7141,6 +7151,40 @@ packages:
performance-now@2.1.0: performance-now@2.1.0:
resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==}
pg-cloudflare@1.1.1:
resolution: {integrity: sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==}
pg-connection-string@2.7.0:
resolution: {integrity: sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA==}
pg-int8@1.0.1:
resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==}
engines: {node: '>=4.0.0'}
pg-pool@3.7.0:
resolution: {integrity: sha512-ZOBQForurqh4zZWjrgSwwAtzJ7QiRX0ovFkZr2klsen3Nm0aoh33Ls0fzfv3imeH/nw/O27cjdz5kzYJfeGp/g==}
peerDependencies:
pg: '>=8.0'
pg-protocol@1.7.0:
resolution: {integrity: sha512-hTK/mE36i8fDDhgDFjy6xNOG+LCorxLG3WO17tku+ij6sVHXh1jQUJ8hYAnRhNla4QVD2H8er/FOjc/+EgC6yQ==}
pg-types@2.2.0:
resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==}
engines: {node: '>=4'}
pg@8.11.0:
resolution: {integrity: sha512-meLUVPn2TWgJyLmy7el3fQQVwft4gU5NGyvV0XbD41iU9Jbg8lCH4zexhIkihDzVHJStlt6r088G6/fWeNjhXA==}
engines: {node: '>= 8.0.0'}
peerDependencies:
pg-native: '>=3.0.1'
peerDependenciesMeta:
pg-native:
optional: true
pgpass@1.0.5:
resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==}
picocolors@1.0.1: picocolors@1.0.1:
resolution: {integrity: sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==} resolution: {integrity: sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==}
@@ -7246,6 +7290,22 @@ packages:
resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
engines: {node: ^10 || ^12 || >=14} engines: {node: ^10 || ^12 || >=14}
postgres-array@2.0.0:
resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==}
engines: {node: '>=4'}
postgres-bytea@1.0.0:
resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==}
engines: {node: '>=0.10.0'}
postgres-date@1.0.7:
resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==}
engines: {node: '>=0.10.0'}
postgres-interval@1.2.0:
resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==}
engines: {node: '>=0.10.0'}
prelude-ls@1.2.1: prelude-ls@1.2.1:
resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
engines: {node: '>= 0.8.0'} engines: {node: '>= 0.8.0'}
@@ -8156,6 +8216,10 @@ packages:
split2@3.2.2: split2@3.2.2:
resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==} resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==}
split2@4.2.0:
resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
engines: {node: '>= 10.x'}
split@1.0.1: split@1.0.1:
resolution: {integrity: sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==} resolution: {integrity: sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==}
@@ -13429,6 +13493,8 @@ snapshots:
buffer-from@1.1.2: {} buffer-from@1.1.2: {}
buffer-writer@2.0.0: {}
buffer@5.7.1: buffer@5.7.1:
dependencies: dependencies:
base64-js: 1.5.1 base64-js: 1.5.1
@@ -13819,13 +13885,13 @@ snapshots:
ini: 1.3.8 ini: 1.3.8
proto-list: 1.2.4 proto-list: 1.2.4
connect-typeorm@1.1.4(typeorm@0.3.11(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5))): connect-typeorm@1.1.4(typeorm@0.3.11(pg@8.11.0)(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5))):
dependencies: dependencies:
'@types/debug': 0.0.31 '@types/debug': 0.0.31
'@types/express-session': 1.17.6 '@types/express-session': 1.17.6
debug: 4.3.5(supports-color@8.1.1) debug: 4.3.5(supports-color@8.1.1)
express-session: 1.18.0 express-session: 1.18.0
typeorm: 0.3.11(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)) typeorm: 0.3.11(pg@8.11.0)(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5))
transitivePeerDependencies: transitivePeerDependencies:
- supports-color - supports-color
@@ -17578,6 +17644,8 @@ snapshots:
dependencies: dependencies:
p-timeout: 3.2.0 p-timeout: 3.2.0
packet-reader@1.0.0: {}
parent-module@1.0.1: parent-module@1.0.1:
dependencies: dependencies:
callsites: 3.1.0 callsites: 3.1.0
@@ -17656,6 +17724,43 @@ snapshots:
performance-now@2.1.0: {} performance-now@2.1.0: {}
pg-cloudflare@1.1.1:
optional: true
pg-connection-string@2.7.0: {}
pg-int8@1.0.1: {}
pg-pool@3.7.0(pg@8.11.0):
dependencies:
pg: 8.11.0
pg-protocol@1.7.0: {}
pg-types@2.2.0:
dependencies:
pg-int8: 1.0.1
postgres-array: 2.0.0
postgres-bytea: 1.0.0
postgres-date: 1.0.7
postgres-interval: 1.2.0
pg@8.11.0:
dependencies:
buffer-writer: 2.0.0
packet-reader: 1.0.0
pg-connection-string: 2.7.0
pg-pool: 3.7.0(pg@8.11.0)
pg-protocol: 1.7.0
pg-types: 2.2.0
pgpass: 1.0.5
optionalDependencies:
pg-cloudflare: 1.1.1
pgpass@1.0.5:
dependencies:
split2: 4.2.0
picocolors@1.0.1: {} picocolors@1.0.1: {}
picomatch@2.3.1: {} picomatch@2.3.1: {}
@@ -17753,6 +17858,16 @@ snapshots:
picocolors: 1.0.1 picocolors: 1.0.1
source-map-js: 1.2.0 source-map-js: 1.2.0
postgres-array@2.0.0: {}
postgres-bytea@1.0.0: {}
postgres-date@1.0.7: {}
postgres-interval@1.2.0:
dependencies:
xtend: 4.0.2
prelude-ls@1.2.1: {} prelude-ls@1.2.1: {}
prettier-linter-helpers@1.0.0: prettier-linter-helpers@1.0.0:
@@ -18882,6 +18997,8 @@ snapshots:
dependencies: dependencies:
readable-stream: 3.6.2 readable-stream: 3.6.2
split2@4.2.0: {}
split@1.0.1: split@1.0.1:
dependencies: dependencies:
through: 2.3.8 through: 2.3.8
@@ -19418,7 +19535,7 @@ snapshots:
typedarray@0.0.6: {} typedarray@0.0.6: {}
typeorm@0.3.11(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)): typeorm@0.3.11(pg@8.11.0)(sqlite3@5.1.4(encoding@0.1.13))(ts-node@10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)):
dependencies: dependencies:
'@sqltools/formatter': 1.2.5 '@sqltools/formatter': 1.2.5
app-root-path: 3.1.0 app-root-path: 3.1.0
@@ -19438,6 +19555,7 @@ snapshots:
xml2js: 0.4.23 xml2js: 0.4.23
yargs: 17.7.2 yargs: 17.7.2
optionalDependencies: optionalDependencies:
pg: 8.11.0
sqlite3: 5.1.4(encoding@0.1.13) sqlite3: 5.1.4(encoding@0.1.13)
ts-node: 10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5) ts-node: 10.9.1(@swc/core@1.6.5(@swc/helpers@0.5.11))(@types/node@20.14.8)(typescript@4.9.5)
transitivePeerDependencies: transitivePeerDependencies:

View File

@@ -1,7 +1,43 @@
import 'reflect-metadata'; import fs from 'fs';
import type { TlsOptions } from 'tls';
import type { DataSourceOptions, EntityTarget, Repository } from 'typeorm'; import type { DataSourceOptions, EntityTarget, Repository } from 'typeorm';
import { DataSource } from 'typeorm'; import { DataSource } from 'typeorm';
const DB_SSL_PREFIX = 'DB_SSL_';
function boolFromEnv(envVar: string, defaultVal = false) {
if (process.env[envVar]) {
return process.env[envVar]?.toLowerCase() === 'true';
}
return defaultVal;
}
function stringOrReadFileFromEnv(envVar: string): Buffer | string | undefined {
if (process.env[envVar]) {
return process.env[envVar];
}
const filePath = process.env[`${envVar}_FILE`];
if (filePath) {
return fs.readFileSync(filePath);
}
return undefined;
}
function buildSslConfig(): TlsOptions | undefined {
if (process.env.DB_USE_SSL?.toLowerCase() !== 'true') {
return undefined;
}
return {
rejectUnauthorized: boolFromEnv(
`${DB_SSL_PREFIX}REJECT_UNAUTHORIZED`,
true
),
ca: stringOrReadFileFromEnv(`${DB_SSL_PREFIX}CA`),
key: stringOrReadFileFromEnv(`${DB_SSL_PREFIX}KEY`),
cert: stringOrReadFileFromEnv(`${DB_SSL_PREFIX}CERT`),
};
}
const devConfig: DataSourceOptions = { const devConfig: DataSourceOptions = {
type: 'sqlite', type: 'sqlite',
database: process.env.CONFIG_DIRECTORY database: process.env.CONFIG_DIRECTORY
@@ -9,10 +45,10 @@ const devConfig: DataSourceOptions = {
: 'config/db/db.sqlite3', : 'config/db/db.sqlite3',
synchronize: true, synchronize: true,
migrationsRun: false, migrationsRun: false,
logging: false, logging: boolFromEnv('DB_LOG_QUERIES'),
enableWAL: true, enableWAL: true,
entities: ['server/entity/**/*.ts'], entities: ['server/entity/**/*.ts'],
migrations: ['server/migration/**/*.ts'], migrations: ['server/migration/sqlite/**/*.ts'],
subscribers: ['server/subscriber/**/*.ts'], subscribers: ['server/subscriber/**/*.ts'],
}; };
@@ -23,16 +59,56 @@ const prodConfig: DataSourceOptions = {
: 'config/db/db.sqlite3', : 'config/db/db.sqlite3',
synchronize: false, synchronize: false,
migrationsRun: false, migrationsRun: false,
logging: false, logging: boolFromEnv('DB_LOG_QUERIES'),
enableWAL: true, enableWAL: true,
entities: ['dist/entity/**/*.js'], entities: ['dist/entity/**/*.js'],
migrations: ['dist/migration/**/*.js'], migrations: ['dist/migration/sqlite/**/*.js'],
subscribers: ['dist/subscriber/**/*.js'], subscribers: ['dist/subscriber/**/*.js'],
}; };
const dataSource = new DataSource( const postgresDevConfig: DataSourceOptions = {
process.env.NODE_ENV !== 'production' ? devConfig : prodConfig type: 'postgres',
); host: process.env.DB_HOST,
port: parseInt(process.env.DB_PORT ?? '5432'),
username: process.env.DB_USER,
password: process.env.DB_PASS,
database: process.env.DB_NAME ?? 'jellyseerr',
ssl: buildSslConfig(),
synchronize: false,
migrationsRun: true,
logging: boolFromEnv('DB_LOG_QUERIES'),
entities: ['server/entity/**/*.ts'],
migrations: ['server/migration/postgres/**/*.ts'],
subscribers: ['server/subscriber/**/*.ts'],
};
const postgresProdConfig: DataSourceOptions = {
type: 'postgres',
host: process.env.DB_HOST,
port: parseInt(process.env.DB_PORT ?? '5432'),
username: process.env.DB_USER,
password: process.env.DB_PASS,
database: process.env.DB_NAME ?? 'jellyseerr',
ssl: buildSslConfig(),
synchronize: false,
migrationsRun: false,
logging: boolFromEnv('DB_LOG_QUERIES'),
entities: ['dist/entity/**/*.js'],
migrations: ['dist/migration/postgres/**/*.js'],
subscribers: ['dist/subscriber/**/*.js'],
};
export const isPgsql = process.env.DB_TYPE === 'postgres';
function getDataSource(): DataSourceOptions {
if (process.env.NODE_ENV === 'production') {
return isPgsql ? postgresProdConfig : prodConfig;
} else {
return isPgsql ? postgresDevConfig : devConfig;
}
}
const dataSource = new DataSource(getDataSource());
export const getRepository = <Entity extends object>( export const getRepository = <Entity extends object>(
target: EntityTarget<Entity> target: EntityTarget<Entity>

View File

@@ -10,6 +10,7 @@ import type { DownloadingItem } from '@server/lib/downloadtracker';
import downloadTracker from '@server/lib/downloadtracker'; import downloadTracker from '@server/lib/downloadtracker';
import { getSettings } from '@server/lib/settings'; import { getSettings } from '@server/lib/settings';
import logger from '@server/logger'; import logger from '@server/logger';
import { DbAwareColumn } from '@server/utils/DbColumnHelper';
import { getHostname } from '@server/utils/getHostname'; import { getHostname } from '@server/utils/getHostname';
import { import {
AfterLoad, AfterLoad,
@@ -42,6 +43,10 @@ class Media {
finalIds = tmdbIds; finalIds = tmdbIds;
} }
if (finalIds.length === 0) {
return [];
}
const media = await mediaRepository const media = await mediaRepository
.createQueryBuilder('media') .createQueryBuilder('media')
.leftJoinAndSelect( .leftJoinAndSelect(
@@ -127,10 +132,23 @@ class Media {
@UpdateDateColumn() @UpdateDateColumn()
public updatedAt: Date; public updatedAt: Date;
@Column({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' }) /**
* The `lastSeasonChange` column stores the date and time when the media was added to the library.
* It needs to be database-aware because SQLite supports `datetime` while PostgreSQL supports `timestamp with timezone (timestampz)`.
*/
@DbAwareColumn({ type: 'datetime', default: () => 'CURRENT_TIMESTAMP' })
public lastSeasonChange: Date; public lastSeasonChange: Date;
@Column({ type: 'datetime', nullable: true }) /**
* The `mediaAddedAt` column stores the date and time when the media was added to the library.
* It needs to be database-aware because SQLite supports `datetime` while PostgreSQL supports `timestamp with timezone (timestampz)`.
* This column is nullable because it can be null when the media is not yet synced to the library.
*/
@DbAwareColumn({
type: 'datetime',
default: () => 'CURRENT_TIMESTAMP',
nullable: true,
})
public mediaAddedAt: Date; public mediaAddedAt: Date;
@Column({ nullable: true, type: 'int' }) @Column({ nullable: true, type: 'int' })

View File

@@ -385,6 +385,7 @@ export class MediaRequest {
@ManyToOne(() => Media, (media) => media.requests, { @ManyToOne(() => Media, (media) => media.requests, {
eager: true, eager: true,
onDelete: 'CASCADE', onDelete: 'CASCADE',
nullable: false,
}) })
public media: Media; public media: Media;
@@ -857,7 +858,7 @@ export class MediaRequest {
const requestRepository = getRepository(MediaRequest); const requestRepository = getRepository(MediaRequest);
this.status = MediaRequestStatus.FAILED; this.status = MediaRequestStatus.FAILED;
requestRepository.save(this); await requestRepository.save(this);
logger.warn( logger.warn(
'Something went wrong sending movie request to Radarr, marking status as FAILED', 'Something went wrong sending movie request to Radarr, marking status as FAILED',
@@ -1132,13 +1133,14 @@ export class MediaRequest {
media[this.is4k ? 'externalServiceSlug4k' : 'externalServiceSlug'] = media[this.is4k ? 'externalServiceSlug4k' : 'externalServiceSlug'] =
sonarrSeries.titleSlug; sonarrSeries.titleSlug;
media[this.is4k ? 'serviceId4k' : 'serviceId'] = sonarrSettings?.id; media[this.is4k ? 'serviceId4k' : 'serviceId'] = sonarrSettings?.id;
await mediaRepository.save(media); await mediaRepository.save(media);
}) })
.catch(async () => { .catch(async () => {
const requestRepository = getRepository(MediaRequest); const requestRepository = getRepository(MediaRequest);
this.status = MediaRequestStatus.FAILED; this.status = MediaRequestStatus.FAILED;
requestRepository.save(this); await requestRepository.save(this);
logger.warn( logger.warn(
'Something went wrong sending series request to Sonarr, marking status as FAILED', 'Something went wrong sending series request to Sonarr, marking status as FAILED',

View File

@@ -23,7 +23,10 @@ class Season {
@Column({ type: 'int', default: MediaStatus.UNKNOWN }) @Column({ type: 'int', default: MediaStatus.UNKNOWN })
public status4k: MediaStatus; public status4k: MediaStatus;
@ManyToOne(() => Media, (media) => media.seasons, { onDelete: 'CASCADE' }) @ManyToOne(() => Media, (media) => media.seasons, {
onDelete: 'CASCADE',
nullable: false,
})
public media: Promise<Media>; public media: Promise<Media>;
@CreateDateColumn() @CreateDateColumn()

View File

@@ -53,6 +53,7 @@ export class Watchlist implements WatchlistItem {
@ManyToOne(() => Media, (media) => media.watchlists, { @ManyToOne(() => Media, (media) => media.watchlists, {
eager: true, eager: true,
onDelete: 'CASCADE', onDelete: 'CASCADE',
nullable: false,
}) })
public media: Media; public media: Media;

View File

@@ -1,5 +1,5 @@
import PlexAPI from '@server/api/plexapi'; import PlexAPI from '@server/api/plexapi';
import dataSource, { getRepository } from '@server/datasource'; import dataSource, { getRepository, isPgsql } from '@server/datasource';
import DiscoverSlider from '@server/entity/DiscoverSlider'; import DiscoverSlider from '@server/entity/DiscoverSlider';
import { Session } from '@server/entity/Session'; import { Session } from '@server/entity/Session';
import { User } from '@server/entity/User'; import { User } from '@server/entity/User';
@@ -66,9 +66,13 @@ app
// Run migrations in production // Run migrations in production
if (process.env.NODE_ENV === 'production') { if (process.env.NODE_ENV === 'production') {
await dbConnection.query('PRAGMA foreign_keys=OFF'); if (isPgsql) {
await dbConnection.runMigrations(); await dbConnection.runMigrations();
await dbConnection.query('PRAGMA foreign_keys=ON'); } else {
await dbConnection.query('PRAGMA foreign_keys=OFF');
await dbConnection.runMigrations();
await dbConnection.query('PRAGMA foreign_keys=ON');
}
} }
// Load Settings // Load Settings

View File

@@ -0,0 +1,304 @@
import type { MigrationInterface, QueryRunner } from 'typeorm';
export class InitialMigration1705599190375 implements MigrationInterface {
name = 'InitialMigration1705599190375';
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`create table if not exists session
(
"expiredAt" bigint,
id text,
json text
);`
);
await queryRunner.query(
`create index if not exists "idx_194703_IDX_28c5d1d16da7908c97c9bc2f74"
on session ("expiredAt");`
);
await queryRunner.query(
`create unique index if not exists idx_194703_sqlite_autoindex_session_1
on session (id);`
);
await queryRunner.query(
`create table if not exists media
(
id serial,
"mediaType" text,
"tmdbId" int,
"tvdbId" int,
"imdbId" text,
status int default '1'::int,
status4k int default '1'::int,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
"lastSeasonChange" timestamp with time zone default CURRENT_TIMESTAMP,
"mediaAddedAt" timestamp with time zone,
"serviceId" int,
"serviceId4k" int,
"externalServiceId" int,
"externalServiceId4k" int,
"externalServiceSlug" text,
"externalServiceSlug4k" text,
"ratingKey" text,
"ratingKey4k" text,
"jellyfinMediaId" text,
"jellyfinMediaId4k" text,
constraint idx_194722_media_pkey
primary key (id)
);`
);
await queryRunner.query(
`create table if not exists season
(
id serial,
"seasonNumber" int,
status int default '1'::int,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
"mediaId" int not null,
status4k int default '1'::int,
constraint idx_194715_season_pkey
primary key (id),
foreign key ("mediaId") references media
on delete cascade
);`
);
await queryRunner.query(
`create index if not exists "idx_194722_IDX_7ff2d11f6a83cb52386eaebe74"
on media ("imdbId");`
);
await queryRunner.query(
`create index if not exists "idx_194722_IDX_41a289eb1fa489c1bc6f38d9c3"
on media ("tvdbId");`
);
await queryRunner.query(
`create index if not exists "idx_194722_IDX_7157aad07c73f6a6ae3bbd5ef5"
on media ("tmdbId");`
);
await queryRunner.query(
`create unique index if not exists idx_194722_sqlite_autoindex_media_1
on media ("tvdbId");`
);
await queryRunner.query(
`create table if not exists "user"
(
id serial,
email text,
username text,
"plexId" int,
"plexToken" text,
permissions int default '0'::int,
avatar text,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
password text,
"userType" int default '1'::int,
"plexUsername" text,
"resetPasswordGuid" text,
"recoveryLinkExpirationDate" date,
"movieQuotaLimit" int,
"movieQuotaDays" int,
"tvQuotaLimit" int,
"tvQuotaDays" int,
"jellyfinUsername" text,
"jellyfinAuthToken" text,
"jellyfinUserId" text,
"jellyfinDeviceId" text,
constraint idx_194731_user_pkey
primary key (id)
);`
);
await queryRunner.query(
`create unique index if not exists idx_194731_sqlite_autoindex_user_1
on "user" (email);`
);
await queryRunner.query(
`create table if not exists user_push_subscription
(
id serial,
endpoint text,
p256dh text,
auth text,
"userId" int,
constraint idx_194740_user_push_subscription_pkey
primary key (id),
foreign key ("userId") references "user"
on delete cascade
);`
);
await queryRunner.query(
`create unique index if not exists idx_194740_sqlite_autoindex_user_push_subscription_1
on user_push_subscription (auth);`
);
await queryRunner.query(
`create table if not exists issue
(
id serial,
"issueType" int,
status int default '1'::int,
"problemSeason" int default '0'::int,
"problemEpisode" int default '0'::int,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
"mediaId" int not null,
"createdById" int,
"modifiedById" int,
constraint idx_194747_issue_pkey
primary key (id),
foreign key ("modifiedById") references "user"
on delete cascade,
foreign key ("createdById") references "user"
on delete cascade,
foreign key ("mediaId") references media
on delete cascade
);`
);
await queryRunner.query(
`create table if not exists issue_comment
(
id serial,
message text,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
"userId" int,
"issueId" int,
constraint idx_194755_issue_comment_pkey
primary key (id),
foreign key ("issueId") references issue
on delete cascade,
foreign key ("userId") references "user"
on delete cascade
);`
);
await queryRunner.query(
`create table if not exists user_settings
(
id serial,
"notificationTypes" text,
"discordId" text,
"userId" int,
region text,
"originalLanguage" text,
"telegramChatId" text,
"telegramSendSilently" boolean,
"pgpKey" text,
locale text default ''::text,
"pushbulletAccessToken" text,
"pushoverApplicationToken" text,
"pushoverUserKey" text,
"watchlistSyncMovies" boolean,
"watchlistSyncTv" boolean,
"pushoverSound" varchar,
constraint idx_194762_user_settings_pkey
primary key (id),
foreign key ("userId") references "user"
on delete cascade
);`
);
await queryRunner.query(
`create unique index if not exists idx_194762_sqlite_autoindex_user_settings_1
on user_settings ("userId");`
);
await queryRunner.query(
`create table if not exists media_request
(
id serial,
status int,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
type text,
"mediaId" int not null,
"requestedById" int,
"modifiedById" int,
is4k boolean default false,
"serverId" int,
"profileId" int,
"rootFolder" text,
"languageProfileId" int,
tags text,
"isAutoRequest" boolean default false,
constraint idx_194770_media_request_pkey
primary key (id),
foreign key ("modifiedById") references "user"
on delete set null,
foreign key ("requestedById") references "user"
on delete cascade,
foreign key ("mediaId") references media
on delete cascade
);`
);
await queryRunner.query(
`create table if not exists season_request
(
id serial NOT NULL,
"seasonNumber" int,
status int default '1'::int,
"createdAt" timestamp with time zone default now(),
"updatedAt" timestamp with time zone default now(),
"requestId" int,
constraint idx_194709_season_request_pkey
primary key (id),
foreign key ("requestId") references media_request
on delete cascade
);`
);
await queryRunner.query(
`create table if not exists discover_slider
(
id serial,
type integer,
"order" integer,
"isBuiltIn" boolean default false,
enabled boolean default true,
title text,
data text,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
constraint idx_194779_discover_slider_pkey
primary key (id)
);`
);
await queryRunner.query(
`create table if not exists watchlist
(
id serial,
"ratingKey" text,
"mediaType" text,
title text,
"tmdbId" int,
"createdAt" timestamp with time zone default CURRENT_TIMESTAMP,
"updatedAt" timestamp with time zone default CURRENT_TIMESTAMP,
"requestedById" int,
"mediaId" int not null,
constraint idx_194788_watchlist_pkey
primary key (id)
);`
);
await queryRunner.query(
`create index if not exists "idx_194788_IDX_939f205946256cc0d2a1ac51a8"
on watchlist ("tmdbId");`
);
await queryRunner.query(
`create unique index if not exists idx_194788_sqlite_autoindex_watchlist_1
on watchlist ("tmdbId", "requestedById");`
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`drop table if exists session cascade`);
await queryRunner.query(`drop table if exists season_request cascade`);
await queryRunner.query(`drop table if exists season cascade`);
await queryRunner.query(
`drop table if exists user_push_subscription cascade`
);
await queryRunner.query(`drop table if exists issue_comment cascade`);
await queryRunner.query(`drop table if exists issue cascade`);
await queryRunner.query(`drop table if exists user_settings cascade`);
await queryRunner.query(`drop table if exists media_request cascade`);
await queryRunner.query(`drop table if exists media cascade`);
await queryRunner.query(`drop table if exists "user" cascade`);
await queryRunner.query(`drop table if exists discover_slider cascade`);
await queryRunner.query(`drop table if exists watchlist cascade`);
}
}

View File

@@ -0,0 +1,32 @@
import type { MigrationInterface, QueryRunner } from 'typeorm';
export class AddBlacklist1730770837441 implements MigrationInterface {
name = 'AddBlacklist1730770837441';
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`CREATE TABLE "blacklist"
(
"id" SERIAL PRIMARY KEY,
"mediaType" VARCHAR NOT NULL,
"title" VARCHAR,
"tmdbId" INTEGER NOT NULL,
"createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
"userId" INTEGER,
"mediaId" INTEGER,
CONSTRAINT "UQ_6bbafa28411e6046421991ea21c" UNIQUE ("tmdbId", "userId")
)`
);
await queryRunner.query(
`CREATE INDEX "IDX_6bbafa28411e6046421991ea21" ON "blacklist" ("tmdbId")`
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`DROP INDEX IF EXISTS "IDX_6bbafa28411e6046421991ea21"`
);
await queryRunner.query(`DROP TABLE IF EXISTS "blacklist"`);
}
}

View File

@@ -113,7 +113,7 @@ requestRoutes.get<Record<string, unknown>, RequestResultsResponse>(
requestStatus: statusFilter, requestStatus: statusFilter,
}) })
.andWhere( .andWhere(
'((request.is4k = 0 AND media.status IN (:...mediaStatus)) OR (request.is4k = 1 AND media.status4k IN (:...mediaStatus)))', '((request.is4k = false AND media.status IN (:...mediaStatus)) OR (request.is4k = true AND media.status4k IN (:...mediaStatus)))',
{ {
mediaStatus: mediaStatusFilter, mediaStatus: mediaStatusFilter,
} }

View File

@@ -45,7 +45,7 @@ router.get('/', async (req, res, next) => {
`CASE WHEN (user.username IS NULL OR user.username = '') THEN ( `CASE WHEN (user.username IS NULL OR user.username = '') THEN (
CASE WHEN (user.plexUsername IS NULL OR user.plexUsername = '') THEN ( CASE WHEN (user.plexUsername IS NULL OR user.plexUsername = '') THEN (
CASE WHEN (user.jellyfinUsername IS NULL OR user.jellyfinUsername = '') THEN CASE WHEN (user.jellyfinUsername IS NULL OR user.jellyfinUsername = '') THEN
user.email "user"."email"
ELSE ELSE
LOWER(user.jellyfinUsername) LOWER(user.jellyfinUsername)
END) END)

View File

@@ -0,0 +1,20 @@
import { isPgsql } from '@server/datasource';
import type { ColumnOptions, ColumnType } from 'typeorm';
import { Column } from 'typeorm';
const pgTypeMapping: { [key: string]: ColumnType } = {
datetime: 'timestamp with time zone',
};
export function resolveDbType(pgType: ColumnType): ColumnType {
if (isPgsql && pgType.toString() in pgTypeMapping) {
return pgTypeMapping[pgType.toString()];
}
return pgType;
}
export function DbAwareColumn(columnOptions: ColumnOptions) {
if (columnOptions.type) {
columnOptions.type = resolveDbType(columnOptions.type);
}
return Column(columnOptions);
}