190 Commits

Author SHA1 Message Date
Peter Wood
babf61b697 added TS_AUTHKEY environment variable 2025-12-01 04:17:53 -08:00
Peter Wood
75d48204ba removed docmost 2025-11-30 16:06:48 -08:00
Peter Wood
255b977aad Add restore script for Gitea, DB, and Runner data 2025-11-30 04:52:13 -08:00
Peter Wood
ce5f3af384 network_mode usage is discontinued in favor of network 2025-11-11 12:12:37 -05:00
Peter Wood
9f6d4744d8 Merge branch 'master' of https://github.com/acedanger/docker 2025-11-10 21:34:54 -05:00
Peter Wood
b3632c0333 Add runner configuration and update docker-compose for Gitea runner 2025-11-10 21:34:49 -05:00
Peter Wood
f9073a07a5 Update .gitignore, README, and Traefik configuration; add new services and environment variables 2025-11-10 19:24:57 -05:00
Peter Wood
b3ee10a119 Merge branch 'master' of https://github.com/acedanger/docker 2025-11-10 06:38:04 -05:00
Peter Wood
6b3089135f Update Chrome and Meilisearch images in docker-compose 2025-11-10 06:28:33 -05:00
Peter Wood
f4ab567706 Add healthcheck and labels to Gitea services in docker-compose; add runner for CICD 2025-11-10 06:27:39 -05:00
Peter Wood
1d1106ff8e Merge branch 'master' of github.com:acedanger/docker 2025-11-10 10:08:32 +00:00
Peter Wood
47e96f24ba add jellystat and jellystat-db services to docker-compose; include updater period environment variable 2025-11-10 10:08:26 +00:00
Peter Wood
dbff56cbac Merge pull request #2 from acedanger/copilot/add-gluetun-dependency-to-sabnzbd
Add gluetun service_healthy dependency to sabnzbd
2025-11-09 22:18:38 -05:00
copilot-swe-agent[bot]
5cbc976c87 Add gluetun service_healthy dependency to sabnzbd container
Co-authored-by: acedanger <1345540+acedanger@users.noreply.github.com>
2025-11-10 03:11:16 +00:00
copilot-swe-agent[bot]
5c15413c2c Initial plan 2025-11-10 03:07:16 +00:00
Peter Wood
99ea6366d3 update badger plugin version to v1.2.0 and add tcp entry for port 5432 2025-09-29 17:05:03 -04:00
Peter Wood
8f21967c25 added skip counting by 2 2025-09-29 17:01:31 -04:00
Peter Wood
77989ad88b auto https was causing an infinite reload with pangolin the functionality 2025-09-29 17:00:46 -04:00
Peter Wood
e5e51662e6 add technical analysis script with multiple indicators and signal identification 2025-09-28 20:33:15 -04:00
Peter Wood
724b0766cb add initial Caddyfile, docker-compose.yml, and HTML files for math flashcards; set up basic structure and styles 2025-09-28 20:32:32 -04:00
Peter Wood
d4c91ffcc2 update images for pangolin and gerbil services; refine comments in docker-compose.yml 2025-09-27 13:03:16 -04:00
Peter Wood
d707ec563d MiningWood homepage 2025-09-27 13:01:57 -04:00
Peter Wood
505003eb74 ignoring the documents added to papra 2025-08-09 02:03:03 +00:00
Peter Wood
ac41d288b9 updated upload path, added dependencies to other containers in the compose file 2025-08-05 07:30:11 -04:00
Peter Wood
7e282f1e29 added ACCEPT_CLIENTS=true env variable 2025-07-30 19:35:32 -04:00
Peter Wood
ee0a6c09dc Merge branch 'master' of github.com:acedanger/docker 2025-07-01 15:03:10 -04:00
Peter Wood
06870eb5e2 apprise config 2025-07-01 15:02:46 -04:00
Peter Wood
882461225e replaced with pangolin on vps 2025-06-21 11:46:19 -04:00
Peter Wood
df76becf48 removed tailsacle subnet router
fuck that noise. I can't figure out how to make it work and allow me to
access devices on my local network. that was supposed to be the point of
it! anyway, skill issue I'm sure but I gave up.
2025-06-21 11:45:15 -04:00
Peter Wood
153c4d6b62 Update Tailscale service configuration: modify TS_EXTRA_ARGS to advertise a broader route range. 2025-06-18 15:44:55 -04:00
Peter Wood
ea4b41ddba Refactor Docker Compose configuration for authentik services: improve healthcheck syntax, add labels, and standardize image tags. 2025-06-17 09:20:58 -04:00
Peter Wood
fe0e4a3056 Add Docker Compose configuration for tclip service 2025-06-17 09:20:39 -04:00
Peter Wood
cbbb5003b0 Add Docker Compose configuration for beszel and beszel-agent services 2025-06-17 09:20:15 -04:00
Peter Wood
ac9d3b719b Merge branch 'master' of https://github.com/acedanger/docker 2025-06-17 09:17:43 -04:00
Peter Wood
6fbd67bad1 Update links.json: standardize owner email, add new links, and modify existing entries 2025-06-17 09:16:50 -04:00
Peter Wood
42047986d8 backup gitea docker files 2025-06-15 20:44:59 -04:00
Peter Wood
bb572d0b14 added jellyfin 2025-06-15 20:44:39 -04:00
Peter Wood
a36d7a29e2 added hostname 2025-06-15 20:42:38 -04:00
Peter Wood
35c87c80c1 saving my changes before the server wipe 2025-06-12 10:10:18 -04:00
Peter Wood
62b02ca8a2 fix: update volume names in docker-compose.yml to use hoarder_data and hoarder_meilisearch 2025-06-11 10:32:21 -04:00
Peter Wood
f853b7c08b fix: update image version variable from HOARDER_VERSION to KARAKEEP_VERSION in docker-compose.yml 2025-06-11 07:39:53 -04:00
Peter Wood
be11063ee9 karakeep service name changed 2025-06-10 21:38:13 -04:00
Peter Wood
310175eee5 gitea compose 2025-06-10 21:37:51 -04:00
Peter Wood
c6e63777c8 renamed hoarder to karakeep 2025-06-10 21:37:37 -04:00
Peter Wood
5cf4409401 Merge branch 'master' of github.com:acedanger/docker 2025-06-01 13:10:53 +00:00
Peter Wood
2a3e521b90 Add Huntarr service to Docker Compose configuration 2025-06-01 13:10:49 +00:00
Peter Wood
8245dfbe51 Add pangolin configuration to .gitignore and update Docker Compose for gitea port 2025-05-28 20:47:00 -04:00
Peter Wood
966250046f Refactor Docker Compose configuration for wiki service and clean up db-data volume definition 2025-05-28 15:39:30 -04:00
Peter Wood
e409bffbe5 Add Docker Compose and Traefik configuration for Pangolin stack 2025-05-28 15:39:11 -04:00
Peter Wood
f6dc32d427 Add scripts for importing GitHub Gists into OpenGist with different authentication methods 2025-05-28 18:30:50 +00:00
Peter Wood
6f02bcb8b9 Add opengist database to .gitignore 2025-05-28 17:01:35 +00:00
Peter Wood
342197e0c9 Fix service dependency name for postgres in Docker Compose configuration 2025-05-28 16:26:56 +00:00
Peter Wood
78196228bc Remove Immich Postgres database backup script 2025-05-27 23:19:28 +00:00
Peter Wood
c6c642d697 Merge branch 'master' of github.com:acedanger/docker 2025-05-27 23:18:58 +00:00
Peter Wood
e750a679d5 Add Docker Compose configurations for newt and opengist services 2025-05-27 22:25:45 +00:00
Peter Wood
5c3b2f0001 Add new Docker Compose configuration for the Newt service 2025-05-27 17:48:06 -04:00
Peter Wood
e1b97ae183 Remove Immich Postgres database backup script 2025-05-27 17:47:36 -04:00
Peter Wood
fc8057284c Merge branch 'master' of github.com:acedanger/docker 2025-05-27 21:32:14 +00:00
Peter Wood
ac476cf408 Add Immich Postgres database backup script and update cocker compose config based on (https://github.com/immich-app/immich/discussions/18429) 2025-05-22 13:15:16 -04:00
Peter Wood
6372b625da Update Caddyfile to replace hostname references with IP addresses and remove obsolete Docker Compose file for memos service 2025-05-22 12:04:37 -04:00
Peter Wood
970db59c91 Fix formatting in docker-compose.yml by adding a newline before volumes section 2025-05-22 00:55:16 +00:00
Peter Wood
77267ff7fa Add SMTP configuration for email notifications in settings.yml 2025-05-22 00:55:00 +00:00
Peter Wood
f58974fec0 Update environment configuration to include google gemini settings 2025-05-17 19:48:10 -04:00
Peter Wood
f15eb4da67 Remove obsolete configuration files and example setups for homepage and speedtest services 2025-05-06 15:09:23 -04:00
Peter Wood
67508b563c Enhance Caddy configuration with global error logging and update reverse proxy settings 2025-05-06 15:09:08 -04:00
Peter Wood
8efe111496 Remove deprecated Gitea setup files and example configurations 2025-04-30 14:03:42 -04:00
Peter Wood
18e4149929 Add Gitea setup documentation, environment configuration, and Docker Compose files 2025-04-30 12:54:39 -04:00
Peter Wood
d163696a3e Add example environment files for various services including PostgreSQL, Telegram, and WireGuard configurations 2025-04-29 17:05:56 -04:00
Peter Wood
734acd2ed4 Add initial Docker Compose configuration and database setup for Dockge 2025-04-29 16:43:52 -04:00
Peter Wood
06209f6583 Add Docker Compose configuration for n8n service and example environment file 2025-04-29 16:17:53 -04:00
Peter Wood
994715cbf9 Add Docker Compose configurations for Authentik and Gatus services; create example environment files for both services. 2025-04-29 16:03:58 -04:00
Peter Wood
ce9a26d2a4 Refactor services.yaml to remove Plex entry and add Jellyseerr configuration; update settings.yml for boolean values and enhance comments for clarity. 2025-04-29 19:51:33 +00:00
Peter Wood
efee0d8ac9 Update .gitignore to remove memos database, fix README.md environment file path, and add docker-compose for omni-tools service 2025-04-29 19:50:16 +00:00
Peter Wood
827ef790d8 Add Docker Compose configurations for Gitea and Plex services 2025-04-29 15:46:10 -04:00
Peter Wood
3fe462b1e6 Add .env.example file and update docker-compose.yml to use HOSTNAME variable 2025-04-29 15:46:10 -04:00
Peter Wood
f4894f860a Merge branch 'master' of https://github.com/acedanger/docker 2025-04-29 15:24:49 -04:00
Peter Wood
df3b673ddd updated export 2025-04-29 15:23:45 -04:00
Peter Wood
c8f57a1cd7 Add Docker Compose configuration for docmost service with PostgreSQL and Redis 2025-04-29 14:35:01 -04:00
Peter Wood
6ee74cfe15 Remove mealie service from docker-compose.yml and add memos_prod.db to .gitignore 2025-04-29 14:34:42 -04:00
Peter Wood
85979c3701 Add Cloudflare DNS support to Caddy configuration
- Updated .env.example to include CF_ZONE_READ and CF_DNS_EDIT variables.
- Enhanced Caddyfile to utilize Cloudflare DNS for TLS.
- Created Dockerfile for building Caddy with Cloudflare DNS support.
- Modified docker-compose.yml to use the new caddy-cloudflare image and set environment variables.
2025-04-29 14:34:25 -04:00
Peter Wood
5f9bfe5b8c added example env file 2025-04-25 12:48:30 -04:00
Peter Wood
894ee83810 updated image name 2025-04-21 11:49:39 -04:00
Peter Wood
06325f35be Merge branch 'master' of github.com:acedanger/docker 2025-03-29 19:05:25 +00:00
Peter Wood
964a0e1f59 remove unused networks section from docker-compose.yml 2025-03-29 19:04:35 +00:00
Peter Wood
995de61997 add default watch setting for Docker provider in diun compose file 2025-03-29 19:02:55 +00:00
Peter Wood
1fe01ec141 corrected timezones env variable 2025-03-29 18:58:41 +00:00
Peter Wood
cc27fc070b added jellyseer 2025-03-29 18:57:30 +00:00
Peter Wood
dd3adbcb91 corrected remote hosts 2025-03-29 18:57:02 +00:00
Peter Wood
deb8380636 update .gitignore to include additional directories for exclusion 2025-03-05 10:19:49 -05:00
Peter Wood
b2e8f084e7 refactor docker-compose.yml for improved readability and formatting 2025-03-05 10:19:41 -05:00
Peter Wood
69d8f43e7f Merge branch 'master' of github.com:acedanger/docker 2025-03-05 09:48:50 -05:00
Peter Wood
271b308b63 update dozzle service configuration in docker-compose for enhanced authentication and actions support 2025-03-05 09:45:51 -05:00
Peter Wood
fde90b6722 update Caddyfile reverse proxy and add extra_hosts to docker-compose for improved connectivity 2025-03-05 09:44:55 -05:00
Peter Wood
6611cd2dee add restart policy to media service in docker-compose.yml 2025-03-05 14:30:46 +00:00
Peter Wood
2c370db732 corrected email in Caddyfil 2025-02-22 21:32:03 -05:00
Peter Wood
36983ee53e add filebrowser database files to .gitignore 2025-02-22 21:30:24 -05:00
Peter Wood
74eae0169e add filebrowser service configuration to docker-compose 2025-02-22 21:28:47 -05:00
Peter Wood
6eb27e52e9 add Caddy configuration and update docker-compose for improved service management 2025-02-22 21:28:09 -05:00
Peter Wood
2b0b730a36 better ignorance of caddy 2025-02-21 19:07:56 -05:00
Peter Wood
dbe9ff8969 cleanup 2025-02-21 19:04:40 -05:00
Peter Wood
d0276fbf09 added immich finally 2025-02-21 19:04:28 -05:00
Peter Wood
44ff38a765 update .gitignore and add diun service labels in multiple docker-compose files 2025-02-13 18:10:36 +00:00
Peter Wood
8b4b5aa78a update .gitignore for stirling and add diun service configuration in compose.yaml 2025-02-12 14:01:40 +00:00
Peter Wood
681b1eb0b0 update search provider to Google, enhance .gitignore for stirling, and add custom settings for security configuration 2025-02-12 13:30:45 +00:00
Peter Wood
324e7b2528 enable security features and update environment variables in docker-compose.yml 2025-02-12 13:19:44 +00:00
Peter Wood
14c0be9244 removed cloudflare network 2025-02-12 13:18:32 +00:00
Peter Wood
602efb113d replaced protonvpn with airvpn 2025-01-25 23:13:13 +00:00
Peter Wood
e77530f67a added metube to download one-off videos 2025-01-22 10:53:37 +00:00
Peter Wood
2b449bd3ed bookmarks 2025-01-19 09:27:53 -05:00
Peter Wood
6d1d667171 adguard home 2025-01-19 09:27:41 -05:00
Peter Wood
99f1db63e3 added stirling-pdf; go/pdf to use when connected to tailscale 2024-12-29 14:19:53 +00:00
Peter Wood
b9bdcd5111 added audiobookshelf 2024-12-28 01:58:56 +00:00
Peter Wood
fb08c89e8d ignorning audiobookshelf metadata 2024-12-28 01:58:34 +00:00
Peter Wood
cd669f1b80 corrected timezone environment variable 2024-12-28 00:19:15 +00:00
Peter Wood
20ce6a6677 corrected yaml syntax for user; reference server.yml in this folder 2024-12-17 11:41:27 -05:00
Peter Wood
86f4202edf removed erroneous character 2024-12-17 11:38:54 -05:00
Peter Wood
af808dabd8 Merge branch 'master' of github.com:acedanger/docker 2024-12-17 16:33:19 +00:00
Peter Wood
f2296af064 added bunches of things 2024-12-17 16:33:16 +00:00
Peter Wood
047e745ac8 removed cloudflare network and specified the volume was created external to the docker compose command 2024-12-11 08:44:14 -05:00
Peter Wood
c9aa0fe616 removed errant character 2024-12-08 12:56:26 -05:00
Peter Wood
a3c1b38d6a added some example .env files to show what values are necessary 2024-12-08 12:53:37 -05:00
Peter Wood
62a91f667d clean upsome compose files 2024-12-08 12:52:48 -05:00
Peter Wood
a2650acb8e removed comments 2024-12-08 12:48:27 -05:00
Peter Wood
c5b691614c removed healthcheck 2024-12-08 12:47:26 -05:00
Peter Wood
ce113a2f22 updated volume 2024-12-06 19:03:36 -05:00
Peter Wood
d79ae20aed removed version since it's obsolete and removed external cloudflare network 2024-12-02 21:15:11 -05:00
Peter Wood
f3ff03126f utilizing a .env file 2024-11-21 12:04:25 +00:00
Peter Wood
4b6968215b added caddy webserver 2024-11-20 15:45:18 +00:00
Peter Wood
a22b6d68b1 homepage 2024-11-20 13:55:25 +00:00
Peter Wood
3189730bb4 cleaned up compose file 2024-11-20 13:55:03 +00:00
Peter Wood
8b90b6b88b history of speedtest 2024-11-20 13:54:42 +00:00
Peter Wood
a912445cc9 Merge branch 'master' of github.com:acedanger/docker 2024-11-20 13:43:25 +00:00
Peter Wood
ded3f76627 renamed kids movies folder 2024-11-20 13:41:09 +00:00
Peter Wood
2e1a5da918 added pinchflat to backup some youtube channels 2024-10-30 14:08:30 +00:00
Peter Wood
e08d9ff144 referenced cloudflare_default network 2024-10-19 02:14:42 +00:00
Peter Wood
2dc27cf8fa added cloudflare_default to each service in compose.yml 2024-10-19 01:59:27 +00:00
Peter Wood
1c0e653f7a Merge branch 'master' of github.com:acedanger/docker 2024-10-19 01:28:30 +00:00
Peter Wood
16431d5bd7 removed vpn credentials from compose.yml 2024-10-19 01:24:19 +00:00
Peter Wood
e9b92a043a mealie 2024-07-28 18:23:33 -04:00
Peter Wood
1a29c32512 added to cloudflare network 2024-07-16 21:20:23 +00:00
Peter Wood
2e501a83ca missing compose files 2024-07-16 19:50:39 +00:00
Peter Wood
cd78b5a52e added another env variable 2024-06-27 01:25:02 +00:00
Peter Wood
dcc92fc501 removed standard notes files 2024-06-04 09:37:15 -04:00
Peter Wood
fb10f6fe43 added environment vars for tokens 2024-06-04 13:31:27 +00:00
Peter Wood
b2cea25c7d removed version 2024-03-30 16:03:25 +00:00
Peter Wood
48dbc19204 added support for environment file; removed user directive; removed quotes from environment variables; referenced .env vars 2024-03-24 01:14:20 +00:00
Peter Wood
7985d9283a added support for environment file 2024-03-24 01:13:08 +00:00
Peter Wood
4b883c87ba added readme for environment files 2024-03-24 00:20:46 +00:00
Peter Wood
abf1370307 ignoring .env 2024-03-24 00:15:53 +00:00
Peter Wood
3fcd626eef removed obsolete 'version' directive 2024-03-23 11:36:18 +00:00
Peter Wood
c347e5c026 removed obsolete 'version' directive 2024-03-22 22:40:16 +00:00
Peter Wood
caa1e0c463 added dozzle 2024-02-03 15:36:04 +00:00
Peter Wood
e373a9bfd4 updated to use latest image; tested on 2/3/24 and it appears to work fine 2024-02-03 15:35:19 +00:00
Peter Wood
a1f9f81f77 removed script bc a container loses it's name when updated this way 2023-12-26 13:17:30 +00:00
Peter Wood
9db7887a69 Merge branch 'master' of github.com:acedanger/docker 2023-12-06 12:00:27 +00:00
Peter Wood
11e3cca248 removed standard-notes 2023-12-06 11:56:30 +00:00
Peter Wood
52d32529dd removed standard notes 2023-12-02 06:29:39 -05:00
Peter Wood
5cb725a0c3 added standard-notes 2023-11-29 18:06:14 +00:00
Peter Wood
3317f07388 added uptime-kuma docker-compose.yml 2023-11-23 15:05:25 +00:00
Peter Wood
e0bdd47f00 Merge remote-tracking branch 'refs/remotes/origin/master' 2023-11-23 14:36:15 +00:00
Peter Wood
b7f27250dd set postgres version number 2023-11-23 14:34:34 +00:00
Peter Wood
8e061c5656 simplified directories ignored 2023-11-23 13:56:01 +00:00
Peter Wood
a720794376 renamed shell script 2023-11-23 13:55:36 +00:00
Peter Wood
ed46611427 Merge remote-tracking branch 'origin' 2023-11-22 20:46:34 -05:00
Peter Wood
f4f934ab85 ignoring cloudflare/tailscale directory 2023-11-22 16:28:40 -05:00
Peter Wood
0be965d941 added restart policy 2023-11-22 16:24:59 -05:00
Peter Wood
f8686a1eec ntfy docker compose; server config and a shell script to push it to the docker container 2023-11-16 17:54:15 +00:00
Peter Wood
d1f64e6b24 updated exposed port; pin version 1.29.2 since 1.30/latest tag wasn't working. the image was stuck in a restarting loop. 2023-11-16 17:53:17 +00:00
Peter Wood
ce338fbe2d ignore any files within each directory 2023-11-16 17:08:52 +00:00
Peter Wood
f51bc5e2b3 removed kasm 2023-11-07 14:38:01 -05:00
Peter Wood
d8760c8aba added kasm 2023-11-07 19:20:51 +00:00
Peter Wood
a3f4447c0c added tailscale and cloudflare to cloudflare tunnels 2023-11-05 19:43:19 -05:00
Peter Wood
7d3b2c6a58 added nginxproxymanager 2023-11-03 20:19:36 +00:00
Peter Wood
4938453905 update media and added vaultwarden 2023-11-02 15:25:14 +00:00
Peter Wood
fb37c6352a added memos; currently live on svr-office 2023-11-02 11:19:34 -04:00
Peter Wood
38065bd878 added golinks; currently live on vperanda 2023-11-02 15:06:01 +00:00
Peter Wood
81611804dd Merge branch 'io' 2023-02-22 20:22:33 +00:00
Peter Wood
d6d52bfc4f added postgres and pgadmin instance 2023-02-11 12:37:45 +00:00
Peter Wood
187ecf45c3 added kids movies root folder to radarr 2023-01-12 21:09:12 +00:00
Peter Wood
9a6d1f4c2b update prowlarr to latest tag 2023-01-09 02:20:26 +00:00
Peter Wood
96f4e04e45 updated volume mappings 2022-12-31 02:56:57 +00:00
Peter Wood
1994280e11 added shell script to update all containers 2022-12-29 21:58:26 +00:00
Peter Wood
e91c477d45 updated volumes 2022-12-17 03:12:17 +00:00
Peter Wood
c3484d07b5 trying out diff volumes 2022-12-05 14:25:53 -05:00
Peter Wood
a109b98ddb volume restructuring 2022-12-05 06:34:40 -05:00
Peter Wood
1efb6e2218 updated volumes 2022-12-05 06:11:53 -05:00
Peter Wood
51a9de57e6 io specific docker-compose with full starr stack 2022-12-05 05:56:49 -05:00
Peter Wood
f799839b66 only connect to US servers 2022-11-17 06:50:25 -05:00
Peter Wood
87eeee9e6a commented out default sabnzb port 2022-11-16 07:28:16 -05:00
Peter Wood
8d9ca8d7c4 Merge pull request #1 from acedanger/windows
windows mount pathing
2022-11-15 07:09:19 -05:00
97 changed files with 5445 additions and 30 deletions

46
.gitignore vendored Normal file
View File

@@ -0,0 +1,46 @@
# ignore environment files
.env
# whatever the hell this file is
core
cloudflare/tailscale/
media/audiobookshelf/
ntfy/*/
vaultwarden/*/
pinchflat/config/
homepage/config/logs/
speedtest/config/
caddy/caddy*
diun/data/
filebrowser/*.db
nginxproxymanager/data
nginxproxymanager/letsencrypt
nginxproxymanager/mysql
opengist/opengist-database
papra/app-data/
# stirling
pdf/stirling/latest/data/
pdf/stirling/latest/logs/
pdf/stirling/latest/config/db/backup/
pdf/stirling/latest/config/*.db
# beszel
beszel/beszel_data/*
beszel/beszel_data/auxiliary.db
# pangolin
pangolin/config/db/db.sqlite
pangolin/config/db/backups/db*.sqlite
pangolin/config/letsencrypt/acme.json
pangolin/config/key
pangolin/config/config.yml.bak
pangolin/installer
pangolin/config/traefik-dashboard/positions/.position
pangolin/config/traefik-dashboard/geoip/*.mmdb
pangolin/config/traefik-dashboard/dashboard/*.db-wal
pangolin/config/traefik/logs/access.log
pangolin/config/traefik-dashboard/dashboard/*.db
pangolin/config/traefik-dashboard/dashboard/*.db-shm

View File

@@ -1,18 +1,28 @@
# Start container
Execute command. # Docker things
## Useful aliases
These are defined in <https://github.com/acedanger/shell>
`dcdn`=`docker compose down`
`dcupd`=`docker compose up -d`
`dcpull`=`docker compose pull`
`dsta`=`docker stop $(docker ps -q)`
`dclf`=`docker compose logs -f`
`dxcit`=`docker container exec -it`
`lzd`=`lazydocker`
## Putting it all together
Shut it down, pull the latest images, start it up in the background, and follow the logs:
```bash ```bash
cd media dcdn && dcpull && dcupd && dclf
docker compose up -d
```
Navigate to http://localhost:8080 to access download client's WebUI.
# View download client logs
Open console for the download client and run this command.
```bash
tail -f config/qBittorrent/logs/qbittorrent.log
``` ```

View File

@@ -0,0 +1,20 @@
name: adguardhome
services:
adguardhome:
image: adguard/adguardhome:latest
ports:
- 53:53/tcp # plain dns over tcp
- 53:53/udp # plain dns over udp
- 3080:80/tcp # http web interface
# - 3081:3000/tcp # initial setup web interface
volumes:
- config:/opt/adguardhome/conf # app configuration
- work:/opt/adguardhome/work # app working directory
restart: always
labels:
- diun.enable=true
volumes:
config:
driver: local
work:
driver: local

View File

@@ -0,0 +1,12 @@
services:
apprise:
image: caronc/apprise:latest
container_name: apprise
restart: unless-stopped
ports:
- 8000:8000
volumes:
- apprise_config:/config
volumes:
apprise_config:

11
authentik/.env.example Normal file
View File

@@ -0,0 +1,11 @@
PG_PASS=
PG_DB=
PG_USER=
PG_PORT=
AUTHENTIK_SECRET_KEY=
AUTHENTIK_ERROR_REPORTING__ENABLED=
COMPOSE_PORT_HTTP=
COMPOSE_PORT_HTTPS=

102
authentik/compose.yml Normal file
View File

@@ -0,0 +1,102 @@
services:
postgresql:
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
labels:
- diun.enable=true
healthcheck:
test:
- CMD-SHELL
- pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
env_file:
- .env
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
labels:
- diun.enable=true
healthcheck:
test:
- CMD-SHELL
- redis-cli ping | grep PONG
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-latest}
restart: unless-stopped
command: server
labels:
- diun.enable=true
environment:
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
env_file:
- .env
ports:
- ${COMPOSE_PORT_HTTP:-9000}:9000
- ${COMPOSE_PORT_HTTPS:-9443}:9443
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.4.0}
restart: unless-stopped
command: worker
labels:
- diun.enable=true
environment:
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
volumes:
database:
driver: local
redis:
driver: local
networks: {}

23
beszel/compose.yaml Normal file
View File

@@ -0,0 +1,23 @@
services:
beszel:
image: henrygd/beszel:latest
container_name: beszel
restart: unless-stopped
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
- ./beszel_socket:/beszel_socket
beszel-agent:
image: henrygd/beszel-agent:latest
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- ./beszel_socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
KEY: ssh-ed25519
AAAAC3NzaC1lZDI1NTE5AAAAIArI9eFVBkR2ftQyQFlj0zvZJI7lpXshN7pxgMflb7Cm
networks: {}

8
caddy/.env.example Normal file
View File

@@ -0,0 +1,8 @@
# create the API token in https://dash.cloudflare.com/profile/api-tokens
# create auth tokens -
# `Zone.Zone:Read` for all zones, and
# `Zone.DNS:Edit` permissions for ptrwd.com
# https://github.com/caddy-dns/cloudflare
CF_ZONE_READ=
CF_DNS_EDIT=

162
caddy/Caddyfile Normal file
View File

@@ -0,0 +1,162 @@
{
email peter@peterwood.dev
# Add global error logging with INFO level (captures errors but not debug noise)
log {
output stdout
format console
level INFO
}
}
ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# this is the the wiki
# 100.108.70.63 is ts-racknerd
reverse_proxy 100.108.70.63:8300
}
wiki.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Route ACME challenges explicitly to be handled internally by Caddy
route /.well-known/acme-challenge/* {
# No directive needed here; Caddy's internal handler takes precedence.
# This prevents the challenge requests from being proxied.
}
# Proxy all other requests to the wiki
route {
# 100.108.70.63 is ts-racknerd
reverse_proxy 100.108.70.63:8300
}
}
jellyfin.peterwood.rocks {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
reverse_proxy host.docker.internal:8096
}
# Serve a simple text message for home.ptrwd.com
home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP using Cf-Connecting-Ip header
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
respond "Welcome home!" 200 {
close # Close the connection after responding
}
}
handle {
respond "Access denied" 403
}
}
# Reverse proxy for sonarr.home.ptrwd.com
sonarr.home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
# 100.114.112.100 is ts-io
reverse_proxy 100.114.112.100:8989
}
handle {
respond 403
}
}
radarr.home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
# 100.114.112.100 is ts-io
reverse_proxy 100.114.112.100:7878
}
handle {
respond 403
}
}
io.home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
# 100.114.112.100 is ts-io
reverse_proxy 100.114.112.100:5001
}
handle {
respond 403
}
}
europa.home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
reverse_proxy host.docker.internal:5001
}
handle {
respond 403
}
}
racknerd.home.ptrwd.com {
tls {
dns cloudflare {
zone_token {env.CF_ZONE_READ}
api_token {env.CF_DNS_EDIT}
}
}
# Allow connections only from private ranges and home IP
@allowAccess client_ip 192.168.0.0/16 172.16.0.0/12 10.0.0.0/8 162.203.102.154/32
handle @allowAccess {
# 100.108.70.63 is ts-racknerd
reverse_proxy 100.108.70.63:5001
}
handle {
respond 403
}
}

9
caddy/Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM caddy:2.10-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/cloudflare
FROM caddy:2.10
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

24
caddy/docker-compose.yml Normal file
View File

@@ -0,0 +1,24 @@
services:
caddy:
image: caddy-cloudflare
restart: always
ports:
- 80:80
- 443:443
- 443:443/udp
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
environment:
CF_ZONE_READ: ${CF_ZONE_READ}
CF_DNS_EDIT: ${CF_DNS_EDIT}
network_mode: host
extra_hosts:
- host.docker.internal:host-gateway
labels:
- diun.enable=true
volumes:
caddy_data:
caddy_config:

3
cloudflare/.env.example Normal file
View File

@@ -0,0 +1,3 @@
TAILSCALE_AUTHKEY=
TAILSCALE_SOCKET=
CLOUDFLARE_TUNNEL_TOKEN=

View File

@@ -0,0 +1,29 @@
services:
cloudflared:
image: cloudflare/cloudflared:latest
container_name: cloudflare-tunnel
restart: unless-stopped
command: tunnel --no-autoupdate run --token ${CLOUDFLARE_TUNNEL_TOKEN}
depends_on:
- tailscale
labels:
- diun.enable=true
tailscale:
container_name: tailscale
image: tailscale/tailscale:stable
volumes:
- ./tailscale:/var/lib # State data will be stored in this directory
- /dev/net/tun:/dev/net/tun # Required for tailscale to work
cap_add:
# Required for tailscale to work
- net_admin
- sys_module
environment:
- TS_AUTHKEY:${TAILSCALE_AUTHKEY}
- TS_SOCKET:${TAILSCALE_SOCKET}
command: tailscaled
privileged: true
restart: unless-stopped
labels:
- diun.enable=true
networks: {}

9
database/.env.example Normal file
View File

@@ -0,0 +1,9 @@
# PostgreSQL configuration
POSTGRES_USER=
# openssl rand -base64 18
POSTGRES_PASSWORD=
# PgAdmin configuration
PGADMIN_DEFAULT_EMAIL=
PGADMIN_DEFAULT_PASSWORD=
PGADMIN_PORT=5050

View File

@@ -0,0 +1,42 @@
services:
postgres:
container_name: postgres_container
image: postgres:15
environment:
POSTGRES_USER: ${POSTGRES_USER:-acedanger}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-changeme}
PGDATA: /data/postgres
volumes:
- postgres_data:/data/postgres
ports:
- "5432:5432"
networks:
- postgres
restart: unless-stopped
labels:
- diun.enable=true
pgadmin:
container_name: pgadmin_container
image: dpage/pgadmin4
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-peter@peterwood.dev}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-admin}
PGADMIN_CONFIG_SERVER_MODE: 'False'
volumes:
- pgadmin_data:/var/lib/pgadmin
ports:
- "${PGADMIN_PORT:-5050}:80"
networks:
- postgres
restart: unless-stopped
labels:
- diun.enable=true
networks:
postgres:
driver: bridge
volumes:
postgres_data:
pgadmin_data:

3
diun/.env.example Normal file
View File

@@ -0,0 +1,3 @@
# Telegram notification settings
TELEGRAM_TOKEN=your_bot_token_here
TELEGRAM_CHAT_ID=your_chat_id_here

23
diun/compose.yaml Normal file
View File

@@ -0,0 +1,23 @@
services:
diun:
image: crazymax/diun:latest
command: serve
hostname: diun
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=America/New_York
- DIUN_WATCH_WORKERS=20
- DIUN_WATCH_SCHEDULE=0 */2 * * *
- DIUN_WATCH_JITTER=30s
- DIUN_DEFAULTS_NOTIFYON=new,update
- DIUN_PROVIDERS_DOCKER=true
- DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true
- DIUN_NOTIF_TELEGRAM_TOKEN=${TELEGRAM_TOKEN}
- DIUN_NOTIF_TELEGRAM_CHATIDS=${TELEGRAM_CHAT_ID}
#- DIUN_NOTIF_TELEGRAM_TEMPLATEBODY=${TELEGRAM_TEMPLATE}
labels:
- diun.enable=true
restart: unless-stopped
networks: {}

34
dockge/README.md Normal file
View File

@@ -0,0 +1,34 @@
# Dockge
## About
A fancy, easy-to-use and reactive self-hosted `docker-compose.yml` stack-oriented manager.
## Links
🔗 <https://github.com/louislam/dockge>
🔗 <https://dockge.kuma.pet/>
## `compose.yml` example
The `compose.yml` (or `docker-compose.yml`) file needs to be saved in `/opt/dockge/compose.yml` or `/opt/dockge/docker-compose.yml`.
```yaml
services:
dockge:
image: louislam/dockge:1
restart: unless-stopped
ports:
# Host Port : Container Port
- 5001:5001
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/app/data
# ⚠️ 1. FULL path only. No relative path (MUST)
# ⚠️ 2. Left Stacks Path === Right Stacks Path (MUST)
- /home/acedanger/docker/:/home/acedanger/docker/
environment:
# Tell Dockge where is your stacks directory
- DOCKGE_STACKS_DIR=/home/acedanger/docker/
```

View File

@@ -0,0 +1,3 @@
{
"type": "sqlite"
}

BIN
dockge/data/dockge.db Normal file

Binary file not shown.

BIN
dockge/data/dockge.db-shm Normal file

Binary file not shown.

BIN
dockge/data/dockge.db-wal Normal file

Binary file not shown.

20
dockge/docker-compose.yml Normal file
View File

@@ -0,0 +1,20 @@
services:
dockge:
image: louislam/dockge:1
restart: unless-stopped
ports:
# Host Port : Container Port
- 5001:5001
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./data:/app/data
# If you want to use private registries, you need to share the auth file with Dockge:
# - /root/.docker/:/root/.docker
# Stacks Directory
# ⚠️ READ IT CAREFULLY. If you did it wrong, your data could end up writing into a WRONG PATH.
# ⚠️ 1. FULL path only. No relative path (MUST)
# ⚠️ 2. Left Stacks Path === Right Stacks Path (MUST)
- /home/acedanger/docker/:/home/acedanger/docker/
environment:
# Tell Dockge where is your stacks directory
- DOCKGE_STACKS_DIR=/home/acedanger/docker/

2
dozzle/.env.example Normal file
View File

@@ -0,0 +1,2 @@
HOSTNAME=

11
dozzle/README.md Normal file
View File

@@ -0,0 +1,11 @@
# Create an environment file
```shell
touch ~/docker/dozzle/.env
nano ~/docker/dozzle/.env
```
## Add the following values
`HOSTNAME`=racknerd|io|europa

14
dozzle/docker-compose.yml Normal file
View File

@@ -0,0 +1,14 @@
services:
dozzle:
image: amir20/dozzle:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 9999:8080
environment:
# DOZZLE_REMOTE_HOST: tcp://ts-rackerd:2375|racknerd,tcp://ts-europa:2375|europa
DOZZLE_HOSTNAME: ${HOSTNAME}
DOZZLE_ENABLE_ACTIONS: true
restart: unless-stopped
labels:
- diun.enable=true

14
filebrowser/compose.yaml Normal file
View File

@@ -0,0 +1,14 @@
services:
filebrowser:
image: filebrowser/filebrowser:latest
container_name: filebrowser
restart: unless-stopped
volumes:
- /mnt/share/media/tv:/srv
- ./database.db:/database.db
environment:
- PUID=1000
- PGID=1000
ports:
- 8212:80
networks: {}

1
gatus/.env.example Normal file
View File

@@ -0,0 +1 @@
# VARIABLE=value #comment

32
gatus/compose.yaml Normal file
View File

@@ -0,0 +1,32 @@
services:
postgres:
image: postgres
volumes:
- ./data/db:/var/lib/postgresql/data
ports:
- "5432:5432"
environment:
- POSTGRES_DB=gatus
- POSTGRES_USER=gatus
- POSTGRES_PASSWORD=sutagabc123
networks:
- gatus-web
gatus:
image: twinproduction/gatus:latest
restart: always
ports:
- "6060:8080"
environment:
- POSTGRES_USER=gatus
- POSTGRES_PASSWORD=sutagabc123
- POSTGRES_DB=gatus
volumes:
- ./config:/config
networks:
- gatus-web
depends_on:
- postgres
networks:
gatus-web:

144
gitea/backup-gitea.sh Normal file
View File

@@ -0,0 +1,144 @@
#!/bin/bash
# filepath: /home/acedanger/docker/gitea/backup-gitea.sh
# Gitea Backup Script
# This script backs up Gitea data and PostgreSQL database
set -e # Exit on any error
# Configuration
BACKUP_DIR="/home/acedanger/backups/gitea"
DATE=$(date +"%Y%m%d_%H%M%S")
COMPOSE_FILE="/home/acedanger/docker/gitea/docker-compose.yml"
COMPOSE_DIR="/home/acedanger/docker/gitea"
# Create backup directory if it doesn't exist
mkdir -p "$BACKUP_DIR"
echo "Starting Gitea backup at $(date)"
# Change to compose directory
cd "$COMPOSE_DIR"
# Create timestamped backup directory
BACKUP_PATH="$BACKUP_DIR/gitea_backup_$DATE"
mkdir -p "$BACKUP_PATH"
# Backup PostgreSQL database
echo "Backing up PostgreSQL database..."
docker-compose exec -T db pg_dump -U ${POSTGRES_USER:-gitea} ${POSTGRES_DB:-gitea} > "$BACKUP_PATH/database.sql"
# Backup Gitea data volume
echo "Backing up Gitea data volume..."
docker run --rm \
-v gitea_gitea:/data:ro \
-v "$BACKUP_PATH":/backup \
alpine:latest \
tar czf /backup/gitea_data.tar.gz -C /data .
# Backup PostgreSQL data volume (optional, as we have the SQL dump)
echo "Backing up PostgreSQL data volume..."
docker run --rm \
-v gitea_postgres:/data:ro \
-v "$BACKUP_PATH":/backup \
alpine:latest \
tar czf /backup/postgres_data.tar.gz -C /data .
# Copy docker-compose configuration
echo "Backing up configuration files..."
cp "$COMPOSE_FILE" "$BACKUP_PATH/"
if [ -f ".env" ]; then
cp ".env" "$BACKUP_PATH/"
fi
# Create a restore script
cat > "$BACKUP_PATH/restore.sh" << 'EOF'
#!/bin/bash
# Restore script for Gitea backup
set -e
RESTORE_DIR="$(dirname "$0")"
COMPOSE_DIR="/home/acedanger/docker/gitea"
echo "WARNING: This will stop Gitea and replace all data!"
read -p "Are you sure you want to continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
echo "Restore cancelled"
exit 1
fi
cd "$COMPOSE_DIR"
# Stop services
echo "Stopping Gitea services..."
docker-compose down
# Remove existing volumes
echo "Removing existing volumes..."
docker volume rm gitea_gitea gitea_postgres || true
# Recreate volumes
echo "Creating volumes..."
docker volume create gitea_gitea
docker volume create gitea_postgres
# Restore Gitea data
echo "Restoring Gitea data..."
docker run --rm \
-v gitea_gitea:/data \
-v "$RESTORE_DIR":/backup:ro \
alpine:latest \
tar xzf /backup/gitea_data.tar.gz -C /data
# Start database for restore
echo "Starting database for restore..."
docker-compose up -d db
# Wait for database to be ready
echo "Waiting for database to be ready..."
sleep 10
# Restore database
echo "Restoring database..."
docker-compose exec -T db psql -U ${POSTGRES_USER:-gitea} -d ${POSTGRES_DB:-gitea} < "$RESTORE_DIR/database.sql"
# Start all services
echo "Starting all services..."
docker-compose up -d
echo "Restore completed!"
EOF
chmod +x "$BACKUP_PATH/restore.sh"
# Create info file
cat > "$BACKUP_PATH/backup_info.txt" << EOF
Gitea Backup Information
========================
Backup Date: $(date)
Backup Location: $BACKUP_PATH
Gitea Version: $(docker-compose exec -T server gitea --version | head -1)
PostgreSQL Version: $(docker-compose exec -T db postgres --version)
Files included:
- database.sql: PostgreSQL database dump
- gitea_data.tar.gz: Gitea data volume
- postgres_data.tar.gz: PostgreSQL data volume
- docker-compose.yml: Docker compose configuration
- .env: Environment variables (if exists)
- restore.sh: Restore script
To restore this backup, run:
cd $BACKUP_PATH
./restore.sh
EOF
# Cleanup old backups (keep last 7 days)
echo "Cleaning up old backups..."
find "$BACKUP_DIR" -type d -name "gitea_backup_*" -mtime +7 -exec rm -rf {} + 2>/dev/null || true
echo "Backup completed successfully!"
echo "Backup saved to: $BACKUP_PATH"
echo "Backup size: $(du -sh "$BACKUP_PATH" | cut -f1)"

84
gitea/docker-compose.yml Normal file
View File

@@ -0,0 +1,84 @@
services:
server:
image: docker.gitea.com/gitea:latest
container_name: gitea
environment:
- USER_UID=${USER_UID}
- USER_GID=${USER_GID}
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=${POSTGRES_USER}
- GITEA__database__USER=${POSTGRES_USER}
- GITEA__database__PASSWD=${POSTGRES_PASSWORD}
restart: always
networks:
- gitea
volumes:
- gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- ${GITEA_HTTP_PORT:-3500}:3000
- ${GITEA_SSH_PORT:-2229}:22
depends_on:
- db
labels:
- diun.enable=true
healthcheck:
test:
- CMD
- curl
- -f
- http://localhost
interval: 10s
retries: 3
start_period: 30s
timeout: 10s
db:
image: docker.io/library/postgres:14
restart: always
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
networks:
- gitea
volumes:
- postgres:/var/lib/postgresql/data
runner:
image: gitea/act_runner:latest
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- runner:/data
- /var/run/docker.sock:/var/run/docker.sock
- ./runner-config.yaml:/data/config.yaml:ro
environment:
- GITEA_INSTANCE_URL=http://server:3000
- GITEA_RUNNER_REGISTRATION_TOKEN=${GITEA_RUNNER_REGISTRATION_TOKEN}
- GITEA_RUNNER_NAME=docker-runner
- CONFIG_FILE=/data/config.yaml
command: >
sh -c "
if [ ! -f /data/.runner ]; then
act_runner register --no-interactive --instance http://server:3000 --token $${GITEA_RUNNER_REGISTRATION_TOKEN} --name docker-runner;
fi;
act_runner --config /data/config.yaml daemon
"
depends_on:
- server
labels:
- diun.enable=true
networks:
gitea:
external: false
volumes:
gitea:
postgres:
runner:

24
gitea/restore.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
# RESTORE SCRIPT
echo "WARNING: This will overwrite your current Gitea/DB/Runner data."
read -p "Are you sure? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
docker compose down
echo "Restoring Database Volume..."
docker compose up -d db
echo "Waiting for DB to initialize..."
sleep 15
cat database.sql | docker compose exec -T db psql -U ${POSTGRES_USER:-gitea} -d ${POSTGRES_DB:-gitea}
echo "Restoring Gitea Files..."
docker run --rm --volumes-from gitea -v $(pwd):/backup alpine tar xzf /backup/gitea_data.tar.gz -C /data
echo "Restoring Runner Files..."
docker run --rm --volumes-from gitea-runner -v $(pwd):/backup alpine tar xzf /backup/runner_data.tar.gz -C /data
echo "Restarting stack..."
docker compose up -d
echo "Restore Complete."

15
gitea/runner-config.yaml Normal file
View File

@@ -0,0 +1,15 @@
log:
level: info
runner:
capacity: 1
timeout: 3h
container:
# Use the gitea network so job containers can resolve the 'server' hostname
network: gitea_gitea
privileged: false
options: ""
workdir_parent: ""
valid_volumes: []
docker_host: ""

1
golinks/.env.example Normal file
View File

@@ -0,0 +1 @@
TAILSCALE_AUTHKEY=

View File

@@ -0,0 +1,14 @@
services:
golink:
container_name: golink
restart: unless-stopped
image: ghcr.io/tailscale/golink:main
environment:
- TS_AUTHKEY:${TS_AUTHKEY}
volumes:
- golinks_data:/home/nonroot
labels:
- diun.enable=true
volumes:
golinks_data:

245
golinks/links.json Normal file
View File

@@ -0,0 +1,245 @@
{
"Short": "ads",
"Long": "http://192.168.68.67:3080/",
"Created": "2025-01-13T12:14:08Z",
"LastEdit": "2025-01-13T12:14:08Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "audiobooks",
"Long": "http://books.peterwood.rocks",
"Created": "2024-12-28T02:00:11Z",
"LastEdit": "2025-04-27T19:17:02Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "books",
"Long": "http://books.peterwood.rocks",
"Created": "2024-12-30T17:26:19Z",
"LastEdit": "2024-12-31T22:59:44Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "calendar",
"Long": "https://calendar.google.com/calendar/u/0/r/{{if .Path}}{{QueryEscape .Path}}{{end}}",
"Created": "2024-12-06T21:46:16Z",
"LastEdit": "2024-12-06T21:46:16Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "cf",
"Long": "https://dash.cloudflare.com/",
"Created": "2025-04-25T12:00:31Z",
"LastEdit": "2025-05-20T19:06:01Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "code",
"Long": "https://vscode.dev/tunnel/{{.Path}}",
"Created": "2025-05-03T01:54:26Z",
"LastEdit": "2025-05-20T19:06:06Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "desktop-kvm",
"Long": "http://192.168.68.78",
"Created": "2025-05-13T12:51:06Z",
"LastEdit": "2025-05-13T12:52:05Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "dl",
"Long": "http://ts-io:8080/sabnzbd/#queue-tab",
"Created": "2024-12-06T21:47:01Z",
"LastEdit": "2024-12-06T21:47:01Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "dlv",
"Long": "https://download.peterwood.rocks/",
"Created": "2025-01-19T15:51:15Z",
"LastEdit": "2025-01-19T15:51:15Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "docker",
"Long": "https://{{if .Path}}{{QueryEscape .Path}}{{end}}.acedanger.com",
"Created": "2024-12-06T23:47:22Z",
"LastEdit": "2025-05-21T14:44:49Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "download",
"Long": "https://download.peterwood.rocks",
"Created": "2025-01-19T15:29:28Z",
"LastEdit": "2025-01-19T15:29:28Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "europa-kvm",
"Long": "http://192.168.68.92",
"Created": "2025-05-06T16:16:18Z",
"LastEdit": "2025-05-06T16:29:24Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "fastmail",
"Long": "https://app.fastmail.com/mail/Inbox/?u=92864047",
"Created": "2025-04-21T13:21:40Z",
"LastEdit": "2025-05-20T19:05:55Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "fastmailrules",
"Long": "https://app.fastmail.com/settings/filters?u=92864047",
"Created": "2025-01-29T17:30:44Z",
"LastEdit": "2025-01-29T17:30:44Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "gh",
"Long": "https://github.com/acedanger{{if .Path}}/{{QueryEscape .Path}}{{end}}",
"Created": "2024-12-06T23:50:47Z",
"LastEdit": "2025-05-03T14:24:13Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "jellyfin",
"Long": "https://jellyfin.peterwood.rocks",
"Created": "2025-04-28T15:17:05Z",
"LastEdit": "2025-05-20T19:05:32Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "logs",
"Long": "http://logs.{{if .Path}}{{QueryEscape .Path}}{{end}}.acedanger.com",
"Created": "2025-04-29T19:06:55Z",
"LastEdit": "2025-05-22T01:21:37Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "medialogs",
"Long": "http://ts-io:8181",
"Created": "2025-01-07T21:18:06Z",
"LastEdit": "2025-01-07T21:18:06Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "memos",
"Long": "https://memos.peterwood.rocks",
"Created": "2025-04-27T18:58:12Z",
"LastEdit": "2025-04-27T19:18:45Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "movies",
"Long": "https://movies.acedanger.com/{{if .Path}}add/new{{end}}",
"Created": "2024-12-06T23:51:18Z",
"LastEdit": "2025-01-19T16:12:49Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "n8n",
"Long": "https://n8n.peterwood.rocks",
"Created": "2025-06-07T13:40:03Z",
"LastEdit": "2025-06-07T13:40:03Z",
"Owner": "tagged-devices"
}
{
"Short": "nas",
"Long": "https://ts-peranda-nas.whale-woodpecker.ts.net",
"Created": "2024-12-06T23:51:59Z",
"LastEdit": "2024-12-06T23:51:59Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "pangolin",
"Long": "https://pangolin.acedanger.com/",
"Created": "2025-05-20T18:30:34Z",
"LastEdit": "2025-05-20T18:30:34Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "paste",
"Long": "https://paste.acedanger.com/",
"Created": "2025-05-26T01:43:55Z",
"LastEdit": "2025-05-26T01:43:55Z",
"Owner": "tagged-devices"
}
{
"Short": "pdf",
"Long": "http://pdf.peterwood.rocks",
"Created": "2024-12-29T14:16:26Z",
"LastEdit": "2025-01-01T19:27:49Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "photos",
"Long": "https://photos.peterwood.rocks",
"Created": "2025-03-25T00:56:00Z",
"LastEdit": "2025-05-20T19:04:54Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "printer",
"Long": "http://192.168.68.58",
"Created": "2024-12-06T23:52:26Z",
"LastEdit": "2025-01-27T16:52:52Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "proton",
"Long": "https://mail.proton.me/u/1/inbox",
"Created": "2024-12-06T23:52:51Z",
"LastEdit": "2024-12-06T23:52:51Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "search",
"Long": "https://www.google.com/{{if .Path}}search?q={{QueryEscape .Path}}{{end}}",
"Created": "2024-12-06T23:54:18Z",
"LastEdit": "2024-12-06T23:54:18Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "sso",
"Long": "https://console.cloud.google.com/apis/credentials?project=scenic-hydra-354612",
"Created": "2025-04-29T12:13:16Z",
"LastEdit": "2025-05-20T19:05:43Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "tailscale",
"Long": "https://login.tailscale.com/admin/machines",
"Created": "2025-04-21T13:20:17Z",
"LastEdit": "2025-05-20T19:05:48Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "tunnels",
"Long": "https://one.dash.cloudflare.com/5ac4ed948de07da22e370484567cc53f/networks/tunnels?search=",
"Created": "2024-12-31T22:59:19Z",
"LastEdit": "2024-12-31T22:59:19Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "tv",
"Long": "https://tv.acedanger.com/{{if .Path}}add/new{{end}}",
"Created": "2024-12-06T23:51:32Z",
"LastEdit": "2025-01-19T16:13:06Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "up",
"Long": "https://up.peterwood.rocks",
"Created": "2025-01-19T15:42:36Z",
"LastEdit": "2025-01-19T15:42:36Z",
"Owner": "acedanger49@gmail.com"
}
{
"Short": "wiki",
"Long": "https://wiki.peterwood.rocks",
"Created": "2024-12-06T23:53:26Z",
"LastEdit": "2024-12-06T23:53:26Z",
"Owner": "acedanger49@gmail.com"
}

23
immich/.env.example Normal file
View File

@@ -0,0 +1,23 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/mnt/share/media/immich/uploads
# The location where your database files are stored
DB_DATA_LOCATION=./database
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=America/New_York
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
# openssl rand -base64 18
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

71
immich/docker-compose.yml Normal file
View File

@@ -0,0 +1,71 @@
#
# WARNING: Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
#
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 2283:2283
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
depends_on:
- redis
- database
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:905c4ee67b8e0aa955331960d2aa745781e6bd89afc44a8584bfd13bc890f0ae
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: --data-checksums
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
restart: always
volumes:
model-cache: null
networks: {}

112
jellyfin/README.md Normal file
View File

@@ -0,0 +1,112 @@
# Jellyfin Docker Migration Guide
## ✅ Current Status
Your Jellyfin Docker container is now running successfully at http://localhost:8096
## 🎯 What's Working
- ✅ Docker Compose setup with named volumes
- ✅ Media libraries properly mapped:
- TV: `/mnt/share/media/tv``/data/tv`
- Anime: `/mnt/share/media/anime``/data/anime`
- Movies: `/mnt/share/media/movies``/data/movies`
- Kids Movies: `/mnt/share/media/movies_kids``/data/movies_kids`
- Babies: `/mnt/share/media/babies``/data/babies`
- ✅ Network ports configured (8096, 8920, 7359, 1900)
- ✅ Container health checks passing
## 🔧 Next Steps
### 1. Initial Setup
1. Open http://localhost:8096 in your browser
2. Complete the initial setup wizard
3. Create your admin user account
4. Add your media libraries using the paths above
### 2. Migrate Your Old Configuration (Optional)
If you want to copy specific settings from your bare metal installation:
```bash
# Stop the container
docker compose down
# Copy specific config files (be selective to avoid database issues)
sudo docker cp /etc/jellyfin/branding.xml jellyfin_volume:/config/config/
sudo docker cp /etc/jellyfin/encoding.xml jellyfin_volume:/config/config/
sudo docker cp /etc/jellyfin/network.xml jellyfin_volume:/config/config/
# Restart the container
docker compose up -d
```
### 3. Plugin Migration
Your old plugins were detected but may need to be reinstalled:
- Chapter Segments Provider
- Open Subtitles
- Playback Reporting
- Reports
- Session Cleaner
- Webhook
### 4. GPU Transcoding (When Ready)
Once you fix your NVIDIA drivers, uncomment these lines in `docker-compose.yml`:
```yaml
# NVIDIA GPU settings (uncomment when drivers are working)
# environment:
# - NVIDIA_VISIBLE_DEVICES=all
# - NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
# runtime: nvidia
```
## 🛠️ NVIDIA Driver Fix
To enable GPU transcoding, you'll need to fix your NVIDIA drivers:
```bash
# Check what driver you need
ubuntu-drivers devices
# Install recommended driver
sudo ubuntu-drivers autoinstall
# Or install specific driver
sudo apt install nvidia-driver-470 # (or whatever version is recommended)
# Reboot
sudo reboot
```
## 📋 File Locations
- **Docker Compose**: `/home/acedanger/docker/jellyfin/docker-compose.yml`
- **Migration Script**: `/home/acedanger/docker/jellyfin/migrate.sh`
- **Config Volume**: `jellyfin_jellyfin_config`
- **Cache Volume**: `jellyfin_jellyfin_cache`
## 🔄 Management Commands
```bash
# Start Jellyfin
docker compose up -d
# Stop Jellyfin
docker compose down
# View logs
docker compose logs -f jellyfin
# Restart Jellyfin
docker compose restart
# Update Jellyfin
docker compose pull && docker compose up -d
```
## 🚫 Old Bare Metal Service
To prevent conflicts, disable the old systemd service:
```bash
sudo systemctl stop jellyfin
sudo systemctl disable jellyfin
```
## 🎬 Access Points
- **Web Interface**: http://localhost:8096
- **HTTPS** (if configured): https://localhost:8920
- **Server IP**: Your container is accessible at `10.0.12.2:8096` from the network
Your Jellyfin migration is complete! The container will automatically restart if your system reboots.

View File

@@ -0,0 +1,46 @@
services:
jellyfin:
#image: jellyfin/jellyfin:latest
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
restart: unless-stopped
ports:
- "8096:8096" # HTTP web UI
- "8920:8920" # HTTPS web UI (optional)
- "7359:7359/udp" # Discovery (optional)
- "1900:1900/udp" # DLNA (optional)
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- JELLYFIN_PublishedServerUrl=http://192.168.68.67:8096
- JELLYFIN_LOG_LEVEL=Warning
# NVIDIA GPU settings (commented out due to driver issues)
# - NVIDIA_VISIBLE_DEVICES=all
# - NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
volumes:
- jellyfin_config:/config
- jellyfin_cache:/cache
# Media directories
- /mnt/share/media/anime:/data/anime
- /mnt/share/media/tv:/data/tv
- /mnt/share/media/babies:/data/babies
- /mnt/share/media/movies:/data/movies
- /mnt/share/media/movies_kids:/data/movies_kids
labels:
- diun.enable=true
# Hardware acceleration devices (commented out - no GPU drivers available)
# devices:
# - /dev/dri:/dev/dri # For Intel/AMD GPU
# Runtime for NVIDIA GPU support (commented out due to driver issues)
# runtime: nvidia
volumes:
jellyfin_config:
driver: local
jellyfin_cache:
driver: local
networks:
default:
name: jellyfin_network

101
jellyfin/migrate.sh Executable file
View File

@@ -0,0 +1,101 @@
#!/bin/bash
# Jellyfin Docker Migrationif ! docker compose version &> /dev/null; then
echo "❌ Docker Compose not found. Please install Docker Compose first."
exit 1
fi
echo "✅ Docker and Docker Compose are available"pt
# This script helps migrate your bare metal Jellyfin installation to Docker
echo "=== Jellyfin Docker Migration Assistant ==="
echo
# Check if Jellyfin service is running
if systemctl is-active --quiet jellyfin; then
echo "⚠️ Jellyfin service is currently running. Please stop it before migration:"
echo " sudo systemctl stop jellyfin"
echo " sudo systemctl disable jellyfin"
echo
exit 1
fi
# Check for NVIDIA drivers and container toolkit
echo "🔍 Checking NVIDIA GPU support..."
if command -v nvidia-smi &> /dev/null; then
echo "✅ NVIDIA drivers detected"
if command -v nvidia-container-runtime &> /dev/null; then
echo "✅ NVIDIA Container Toolkit detected"
echo " You can uncomment the NVIDIA GPU sections in docker compose.yml"
else
echo "⚠️ NVIDIA Container Toolkit not found. Install it for GPU transcoding:"
echo " https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html"
fi
else
echo " NVIDIA drivers not detected. GPU transcoding will not be available."
fi
echo
# Check Docker and Docker Compose
echo "🔍 Checking Docker installation..."
if ! command -v docker &> /dev/null; then
echo "❌ Docker not found. Please install Docker first."
exit 1
fi
if ! command -v docker compose &> /dev/null && ! docker compose version &> /dev/null; then
echo "❌ Docker Compose not found. Please install Docker Compose first."
exit 1
fi
echo "✅ Docker and Docker Compose are available"
echo
# Migration steps
echo "📋 Migration Steps:"
echo "1. Copy your existing Jellyfin data to Docker volumes"
echo "2. Update media paths in docker compose.yml"
echo "3. Configure GPU transcoding (if applicable)"
echo "4. Start the container"
echo
# Offer to copy existing data
read -p "Do you want to copy existing Jellyfin data? (y/n): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "🔄 Creating Docker volumes and copying data..."
# Create the container to create volumes
docker compose up --no-start
# Copy configuration data
if [ -d "/var/lib/jellyfin" ]; then
echo "📁 Copying Jellyfin data directory..."
sudo docker cp /var/lib/jellyfin/. jellyfin:/config/
fi
# Copy cache if it exists
if [ -d "/var/cache/jellyfin" ]; then
echo "📁 Copying Jellyfin cache directory..."
sudo docker cp /var/cache/jellyfin/. jellyfin:/cache/
fi
echo "✅ Data migration completed"
fi
echo
echo "🎬 Next steps:"
echo "1. Edit docker compose.yml and update the media volume paths:"
echo " - Replace '/path/to/your/movies' with your actual movie directory"
echo " - Replace '/path/to/your/tv' with your actual TV shows directory"
echo " - Replace '/path/to/your/music' with your actual music directory"
echo
echo "2. If you have NVIDIA GPU and drivers installed:"
echo " - Uncomment the NVIDIA GPU sections in docker compose.yml"
echo
echo "3. Start Jellyfin:"
echo " docker compose up -d"
echo
echo "4. Access Jellyfin at: http://localhost:8096"
echo
echo "5. Optional: Stop and disable the systemd service:"
echo " sudo systemctl stop jellyfin"
echo " sudo systemctl disable jellyfin"

12
karakeep/.env.example Normal file
View File

@@ -0,0 +1,12 @@
KARAKEEP_VERSION=release
# openssl rand -base64 18
NEXTAUTH_SECRET=
OPENAI_API_KEY=
NEXTAUTH_URL=
MEILI_MASTER_KEY=
# bottom 3 assume we're using Google Gemini
OPENAI_API_KEY=
OPENAI_BASE_URL=https://generativelanguage.googleapis.com/v1beta
INFERENCE_TEXT_MODEL=gemini-2.0-flash
INFERENCE_IMAGE_MODEL=gemini-2.0-flash

View File

@@ -0,0 +1,49 @@
name: karakeep
services:
karakeep:
image: ghcr.io/karakeep-app/karakeep:${KARAKEEP_VERSION:-release}
restart: unless-stopped
volumes:
- hoarder_data:/data
ports:
- 3000:3000
environment:
MEILI_ADDR: http://meilisearch:7700
BROWSER_WEB_URL: http://chrome:9222
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET}
OPENAI_API_KEY: ${OPENAI_API_KEY}
NEXTAUTH_URL: ${NEXTAUTH_URL}
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY}
OPENAI_BASE_URL: ${OPENAI_BASE_URL}
INFERENCE_TEXT_MODEL: ${INFERENCE_TEXT_MODEL}
INFERENCE_IMAGE_MODEL: ${INFERENCE_IMAGE_MODEL}
DATA_DIR: /data
labels:
- diun.enable=true
chrome:
image: gcr.io/zenika-hub/alpine-chrome:124
restart: unless-stopped
labels:
- diun.enable=true
command:
- --no-sandbox
- --disable-gpu
- --disable-dev-shm-usage
- --remote-debugging-address=0.0.0.0
- --remote-debugging-port=9222
- --hide-scrollbars
meilisearch:
image: getmeili/meilisearch:v1.13.3
restart: unless-stopped
environment:
MEILI_NO_ANALYTICS: true
volumes:
- hoarder_meilisearch:/meili_data
labels:
- diun.enable=true
volumes:
hoarder_meilisearch:
external: true
hoarder_data:
external: true
networks: {}

9
margotwood/Caddyfile Normal file
View File

@@ -0,0 +1,9 @@
{
auto_https off
}
:80 {
root * /usr/share/caddy
encode gzip
file_server
}

View File

@@ -0,0 +1,16 @@
services:
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- 8083:80
- 8243:443
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- ./:/usr/share/caddy
- caddy_data:/data
- caddy_config:/config
volumes:
caddy_data: null
caddy_config: null
networks: {}

View File

@@ -0,0 +1,430 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Toddler Math Flash Cards - Addition Fun!</title>
<link href="https://fonts.googleapis.com/css2?family=Nunito:wght@400;600;700;800&display=swap" rel="stylesheet">
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
display: flex;
justify-content: center;
align-items: flex-start;
color: #333;
padding: 10px 0;
}
.app-container {
background: white;
border-radius: 20px;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
padding: 20px;
text-align: center;
max-width: 500px;
width: 95%;
margin: 10px auto;
}
.title {
color: #4a5568;
font-size: 2.2em;
margin-bottom: 15px;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
}
.flashcard {
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
border-radius: 15px;
padding: 25px;
margin: 15px 0;
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.1);
transition: transform 0.3s ease;
}
.flashcard:hover {
transform: scale(1.05);
}
.equation {
font-size: 3.5em;
font-weight: bold;
color: white;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
margin-bottom: 10px;
}
.answer-section {
margin: 20px 0;
}
.answer-input {
font-size: 2.5em;
padding: 12px 15px;
border: 4px solid #667eea;
border-radius: 15px;
text-align: center;
width: 130px;
margin: 0 auto 15px auto;
display: block;
font-family: 'Nunito', sans-serif;
}
.answer-input:focus {
outline: none;
border-color: #f093fb;
box-shadow: 0 0 20px rgba(240, 147, 251, 0.3);
}
.buttons {
display: flex;
gap: 15px;
justify-content: center;
margin-top: 20px;
}
.btn {
font-size: 1.5em;
padding: 15px 30px;
border: none;
border-radius: 50px;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
transition: all 0.3s ease;
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
}
.btn-check {
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
color: white;
}
.btn-next {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
color: white;
}
.btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.feedback {
font-size: 2.5em;
font-weight: bold;
margin: 20px 0;
padding: 20px;
border-radius: 15px;
transition: all 0.5s ease;
}
.correct {
background: linear-gradient(135deg, #84fab0 0%, #8fd3f4 100%);
color: #2d5a27;
}
.incorrect {
background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 100%);
color: #8b4513;
}
.score {
background: rgba(255, 255, 255, 0.9);
padding: 8px 15px;
border-radius: 20px;
font-size: 1em;
font-weight: bold;
color: #4a5568;
margin-bottom: 15px;
display: inline-block;
}
.celebration {
font-size: 3em;
animation: bounce 1s infinite;
}
@keyframes bounce {
0%,
20%,
50%,
80%,
100% {
transform: translateY(0);
}
40% {
transform: translateY(-30px);
}
60% {
transform: translateY(-15px);
}
}
.number-buttons {
display: grid;
grid-template-columns: repeat(4, 1fr);
gap: 10px;
margin: 15px 0;
}
.number-buttons .number-btn:last-child {
grid-column: span 2;
}
.number-btn {
font-size: 1.8em;
padding: 12px;
border: 3px solid #667eea;
border-radius: 12px;
background: white;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
color: #667eea;
transition: all 0.2s ease;
min-height: 50px;
}
.number-btn:hover {
background: #667eea;
color: white;
transform: scale(1.1);
}
@media (max-width: 600px) {
.app-container {
padding: 15px;
margin: 5px auto;
}
.title {
font-size: 1.8em;
margin-bottom: 10px;
}
.equation {
font-size: 2.8em;
}
.answer-input {
font-size: 2.2em;
width: 110px;
}
.number-btn {
font-size: 1.6em;
padding: 10px;
min-height: 45px;
}
.btn {
font-size: 1.3em;
padding: 12px 25px;
}
}
@media (max-height: 700px) {
.flashcard {
padding: 20px;
margin: 10px 0;
}
.equation {
font-size: 3em;
}
.number-btn {
font-size: 1.6em;
padding: 8px;
min-height: 40px;
}
}
</style>
</head>
<body>
<div class="app-container">
<div class="score" id="score">Score: 0/0</div>
<h1 class="title">🎓 Addition Practice</h1>
<div class="flashcard">
<div class="equation" id="equation">5 + 3 = ?</div>
</div>
<div class="answer-section">
<input type="number" class="answer-input" id="answerInput" placeholder="?" min="0" max="20">
<div class="number-buttons" id="numberButtons">
<button class="number-btn" onclick="inputNumber(0)">0</button>
<button class="number-btn" onclick="inputNumber(1)">1</button>
<button class="number-btn" onclick="inputNumber(2)">2</button>
<button class="number-btn" onclick="inputNumber(3)">3</button>
<button class="number-btn" onclick="inputNumber(4)">4</button>
<button class="number-btn" onclick="inputNumber(5)">5</button>
<button class="number-btn" onclick="inputNumber(6)">6</button>
<button class="number-btn" onclick="inputNumber(7)">7</button>
<button class="number-btn" onclick="inputNumber(8)">8</button>
<button class="number-btn" onclick="inputNumber(9)">9</button>
<button class="number-btn" onclick="inputNumber(10)">10</button>
<button class="number-btn" onclick="clearAnswer()"
style="background: #ff6b6b; color: white;">Clear</button>
</div>
</div>
<div class="feedback" id="feedback" style="display: none;"></div>
<div class="buttons">
<button class="btn btn-check" onclick="checkAnswer()">Check Answer! 🎯</button>
<button class="btn btn-next" onclick="nextCard()" style="display: none;" id="nextBtn">Next Card! ➡️</button>
</div>
</div>
<script>
let currentNum1, currentNum2, correctAnswer;
let totalQuestions = 0;
let correctAnswers = 0;
function generateNewCard () {
// Generate two random numbers from 0 to 10
currentNum1 = Math.floor(Math.random() * 11);
currentNum2 = Math.floor(Math.random() * 11);
correctAnswer = currentNum1 + currentNum2;
// Update the equation display
document.getElementById('equation').textContent = `${currentNum1} + ${currentNum2} = ?`;
// Reset the input and feedback
document.getElementById('answerInput').value = '';
document.getElementById('feedback').style.display = 'none';
document.getElementById('nextBtn').style.display = 'none';
// Don't focus on input to prevent keyboard from showing on mobile
}
function inputNumber (num) {
const input = document.getElementById('answerInput');
const currentValue = input.value;
if (currentValue === '' || currentValue === '0') {
input.value = num;
} else if (currentValue.length < 2) { // Limit to 2 digits max
input.value = currentValue + num;
}
}
function clearAnswer () {
document.getElementById('answerInput').value = '';
}
function checkAnswer () {
const userAnswer = parseInt(document.getElementById('answerInput').value);
const feedbackEl = document.getElementById('feedback');
if (isNaN(userAnswer)) {
alert('Please enter a number! 😊');
return;
}
totalQuestions++;
if (userAnswer === correctAnswer) {
correctAnswers++;
feedbackEl.innerHTML = `<div class="celebration">🎉</div>Awesome! ${currentNum1} + ${currentNum2} = ${correctAnswer}`;
feedbackEl.className = 'feedback correct';
// Play success sound (if browser supports it)
playSound('success');
} else {
feedbackEl.innerHTML = `<div>Try again! 💪</div>${currentNum1} + ${currentNum2} = ${correctAnswer}`;
feedbackEl.className = 'feedback incorrect';
// Play try again sound (if browser supports it)
playSound('tryAgain');
}
feedbackEl.style.display = 'block';
document.getElementById('nextBtn').style.display = 'inline-block';
updateScore();
}
function nextCard () {
generateNewCard();
}
function updateScore () {
const percentage = totalQuestions > 0 ? Math.round((correctAnswers / totalQuestions) * 100) : 0;
document.getElementById('score').textContent = `Score: ${correctAnswers}/${totalQuestions} (${percentage}%)`;
}
function playSound (type) {
// Create audio context for simple beep sounds
try {
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const oscillator = audioContext.createOscillator();
const gainNode = audioContext.createGain();
oscillator.connect(gainNode);
gainNode.connect(audioContext.destination);
if (type === 'success') {
// Happy ascending notes
oscillator.frequency.setValueAtTime(523.25, audioContext.currentTime); // C5
oscillator.frequency.setValueAtTime(659.25, audioContext.currentTime + 0.1); // E5
oscillator.frequency.setValueAtTime(783.99, audioContext.currentTime + 0.2); // G5
} else {
// Gentle encouraging tone
oscillator.frequency.setValueAtTime(440, audioContext.currentTime); // A4
}
gainNode.gain.setValueAtTime(0, audioContext.currentTime);
gainNode.gain.linearRampToValueAtTime(0.1, audioContext.currentTime + 0.01);
gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + 0.3);
oscillator.start(audioContext.currentTime);
oscillator.stop(audioContext.currentTime + 0.3);
} catch (e) {
// Audio not supported, silent fail
}
}
// Keyboard support
document.getElementById('answerInput').addEventListener('keypress', function (e) {
if (e.key === 'Enter') {
if (document.getElementById('nextBtn').style.display === 'none') {
checkAnswer();
} else {
nextCard();
}
}
});
// Initialize the first card when page loads
window.onload = function () {
generateNewCard();
updateScore();
};
// Prevent negative numbers
document.getElementById('answerInput').addEventListener('input', function (e) {
if (e.target.value < 0) {
e.target.value = 0;
}
});
</script>
</body>
</html>

View File

@@ -0,0 +1,431 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Toddler Math Flash Cards - Subtraction Fun!</title>
<link href="https://fonts.googleapis.com/css2?family=Nunito:wght@400;600;700;800&display=swap" rel="stylesheet">
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
display: flex;
justify-content: center;
align-items: flex-start;
color: #333;
padding: 10px 0;
}
.app-container {
background: white;
border-radius: 20px;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
padding: 20px;
text-align: center;
max-width: 500px;
width: 95%;
margin: 10px auto;
}
.title {
color: #4a5568;
font-size: 2.2em;
margin-bottom: 15px;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
}
.flashcard {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
border-radius: 15px;
padding: 25px;
margin: 15px 0;
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.1);
transition: transform 0.3s ease;
}
.flashcard:hover {
transform: scale(1.05);
}
.equation {
font-size: 3.5em;
font-weight: bold;
color: white;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
margin-bottom: 10px;
}
.answer-section {
margin: 20px 0;
}
.answer-input {
font-size: 2.5em;
padding: 12px 15px;
border: 4px solid #667eea;
border-radius: 15px;
text-align: center;
width: 130px;
margin: 0 auto 15px auto;
display: block;
font-family: 'Nunito', sans-serif;
}
.answer-input:focus {
outline: none;
border-color: #43e97b;
box-shadow: 0 0 20px rgba(67, 233, 123, 0.3);
}
.buttons {
display: flex;
gap: 15px;
justify-content: center;
margin-top: 20px;
}
.btn {
font-size: 1.5em;
padding: 15px 30px;
border: none;
border-radius: 50px;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
transition: all 0.3s ease;
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
}
.btn-check {
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
color: white;
}
.btn-next {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
color: white;
}
.btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.feedback {
font-size: 2.5em;
font-weight: bold;
margin: 20px 0;
padding: 20px;
border-radius: 15px;
transition: all 0.5s ease;
}
.correct {
background: linear-gradient(135deg, #84fab0 0%, #8fd3f4 100%);
color: #2d5a27;
}
.incorrect {
background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 100%);
color: #8b4513;
}
.score {
background: rgba(255, 255, 255, 0.9);
padding: 8px 15px;
border-radius: 20px;
font-size: 1em;
font-weight: bold;
color: #4a5568;
margin-bottom: 15px;
display: inline-block;
}
.celebration {
font-size: 3em;
animation: bounce 1s infinite;
}
@keyframes bounce {
0%,
20%,
50%,
80%,
100% {
transform: translateY(0);
}
40% {
transform: translateY(-30px);
}
60% {
transform: translateY(-15px);
}
}
.number-buttons {
display: grid;
grid-template-columns: repeat(4, 1fr);
gap: 10px;
margin: 15px 0;
}
.number-buttons .number-btn:last-child {
grid-column: span 2;
}
.number-btn {
font-size: 1.8em;
padding: 12px;
border: 3px solid #667eea;
border-radius: 12px;
background: white;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
color: #667eea;
transition: all 0.2s ease;
min-height: 50px;
}
.number-btn:hover {
background: #667eea;
color: white;
transform: scale(1.1);
}
@media (max-width: 600px) {
.app-container {
padding: 15px;
margin: 5px auto;
}
.title {
font-size: 1.8em;
margin-bottom: 10px;
}
.equation {
font-size: 2.8em;
}
.answer-input {
font-size: 2.2em;
width: 110px;
}
.number-btn {
font-size: 1.6em;
padding: 10px;
min-height: 45px;
}
.btn {
font-size: 1.3em;
padding: 12px 25px;
}
}
@media (max-height: 700px) {
.flashcard {
padding: 20px;
margin: 10px 0;
}
.equation {
font-size: 3em;
}
.number-btn {
font-size: 1.6em;
padding: 8px;
min-height: 40px;
}
}
</style>
</head>
<body>
<div class="app-container">
<div class="score" id="score">Score: 0/0</div>
<h1 class="title">🎓 Subtraction Practice</h1>
<div class="flashcard">
<div class="equation" id="equation">5 + 3 = ?</div>
</div>
<div class="answer-section">
<input type="number" class="answer-input" id="answerInput" placeholder="?" min="0" max="10">
<div class="number-buttons" id="numberButtons">
<button class="number-btn" onclick="inputNumber(0)">0</button>
<button class="number-btn" onclick="inputNumber(1)">1</button>
<button class="number-btn" onclick="inputNumber(2)">2</button>
<button class="number-btn" onclick="inputNumber(3)">3</button>
<button class="number-btn" onclick="inputNumber(4)">4</button>
<button class="number-btn" onclick="inputNumber(5)">5</button>
<button class="number-btn" onclick="inputNumber(6)">6</button>
<button class="number-btn" onclick="inputNumber(7)">7</button>
<button class="number-btn" onclick="inputNumber(8)">8</button>
<button class="number-btn" onclick="inputNumber(9)">9</button>
<button class="number-btn" onclick="inputNumber(10)">10</button>
<button class="number-btn" onclick="clearAnswer()"
style="background: #ff6b6b; color: white;">Clear</button>
</div>
</div>
<div class="feedback" id="feedback" style="display: none;"></div>
<div class="buttons">
<button class="btn btn-check" onclick="checkAnswer()">Check Answer! 🎯</button>
<button class="btn btn-next" onclick="nextCard()" style="display: none;" id="nextBtn">Next Card! ➡️</button>
</div>
</div>
<script>
let currentNum1, currentNum2, correctAnswer;
let totalQuestions = 0;
let correctAnswers = 0;
function generateNewCard () {
// Generate two random numbers from 0 to 10, ensuring positive result
currentNum1 = Math.floor(Math.random() * 11); // 0-10
currentNum2 = Math.floor(Math.random() * (currentNum1 + 1)); // 0 to currentNum1 (ensures positive result)
correctAnswer = currentNum1 - currentNum2;
// Update the equation display
document.getElementById('equation').textContent = `${currentNum1} - ${currentNum2} = ?`;
// Reset the input and feedback
document.getElementById('answerInput').value = '';
document.getElementById('feedback').style.display = 'none';
document.getElementById('nextBtn').style.display = 'none';
// Focus on input for keyboard users
document.getElementById('answerInput').focus();
}
function inputNumber (num) {
const input = document.getElementById('answerInput');
const currentValue = input.value;
if (currentValue === '' || currentValue === '0') {
input.value = num;
} else if (currentValue.length < 2) { // Limit to 2 digits max
input.value = currentValue + num;
}
}
function clearAnswer () {
document.getElementById('answerInput').value = '';
}
function checkAnswer () {
const userAnswer = parseInt(document.getElementById('answerInput').value);
const feedbackEl = document.getElementById('feedback');
if (isNaN(userAnswer)) {
alert('Please enter a number! 😊');
return;
}
totalQuestions++;
if (userAnswer === correctAnswer) {
correctAnswers++;
feedbackEl.innerHTML = `<div class="celebration">🎉</div>Awesome! ${currentNum1} - ${currentNum2} = ${correctAnswer}`;
feedbackEl.className = 'feedback correct';
// Play success sound (if browser supports it)
playSound('success');
} else {
feedbackEl.innerHTML = `<div>Try again! 💪</div>${currentNum1} - ${currentNum2} = ${correctAnswer}`;
feedbackEl.className = 'feedback incorrect';
// Play try again sound (if browser supports it)
playSound('tryAgain');
}
feedbackEl.style.display = 'block';
document.getElementById('nextBtn').style.display = 'inline-block';
updateScore();
}
function nextCard () {
generateNewCard();
}
function updateScore () {
const percentage = totalQuestions > 0 ? Math.round((correctAnswers / totalQuestions) * 100) : 0;
document.getElementById('score').textContent = `Score: ${correctAnswers}/${totalQuestions} (${percentage}%)`;
}
function playSound (type) {
// Create audio context for simple beep sounds
try {
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const oscillator = audioContext.createOscillator();
const gainNode = audioContext.createGain();
oscillator.connect(gainNode);
gainNode.connect(audioContext.destination);
if (type === 'success') {
// Happy ascending notes
oscillator.frequency.setValueAtTime(523.25, audioContext.currentTime); // C5
oscillator.frequency.setValueAtTime(659.25, audioContext.currentTime + 0.1); // E5
oscillator.frequency.setValueAtTime(783.99, audioContext.currentTime + 0.2); // G5
} else {
// Gentle encouraging tone
oscillator.frequency.setValueAtTime(440, audioContext.currentTime); // A4
}
gainNode.gain.setValueAtTime(0, audioContext.currentTime);
gainNode.gain.linearRampToValueAtTime(0.1, audioContext.currentTime + 0.01);
gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + 0.3);
oscillator.start(audioContext.currentTime);
oscillator.stop(audioContext.currentTime + 0.3);
} catch (e) {
// Audio not supported, silent fail
}
}
// Keyboard support
document.getElementById('answerInput').addEventListener('keypress', function (e) {
if (e.key === 'Enter') {
if (document.getElementById('nextBtn').style.display === 'none') {
checkAnswer();
} else {
nextCard();
}
}
});
// Initialize the first card when page loads
window.onload = function () {
generateNewCard();
updateScore();
};
// Prevent negative numbers
document.getElementById('answerInput').addEventListener('input', function (e) {
if (e.target.value < 0) {
e.target.value = 0;
}
});
</script>
</body>
</html>

222
margotwood/index.html Normal file
View File

@@ -0,0 +1,222 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Learning Fun for Toddlers!</title>
<link href="https://fonts.googleapis.com/css2?family=Nunito:wght@400;600;700;800&display=swap" rel="stylesheet">
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
padding: 20px;
color: #333;
}
.container {
background: white;
border-radius: 30px;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.2);
padding: 40px 30px;
text-align: center;
max-width: 600px;
width: 100%;
margin: 20px;
}
.title {
color: #4a5568;
font-size: 3em;
margin-bottom: 20px;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
line-height: 1.2;
}
.subtitle {
color: #667eea;
font-size: 1.5em;
margin-bottom: 40px;
font-weight: normal;
}
.app-link {
display: block;
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
color: white;
text-decoration: none;
font-size: 2.5em;
font-weight: bold;
padding: 40px 30px;
border-radius: 25px;
margin: 30px 0;
box-shadow: 0 15px 30px rgba(0, 0, 0, 0.2);
transition: all 0.3s ease;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
min-height: 120px;
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
gap: 10px;
}
.app-link:hover,
.app-link:active {
transform: scale(1.05);
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3);
background: linear-gradient(135deg, #f5576c 0%, #f093fb 100%);
}
.app-link:active {
transform: scale(0.98);
}
.emoji {
font-size: 1.5em;
margin-bottom: 10px;
}
.link-text {
font-size: 0.8em;
line-height: 1.2;
}
.footer {
margin-top: 30px;
color: #667eea;
font-size: 1.2em;
}
/* Mobile-first responsive design */
@media (max-width: 480px) {
.container {
padding: 30px 20px;
margin: 10px;
}
.title {
font-size: 2.5em;
}
.subtitle {
font-size: 1.3em;
}
.app-link {
font-size: 2.2em;
padding: 35px 25px;
min-height: 100px;
}
}
@media (max-width: 360px) {
.title {
font-size: 2.2em;
}
.app-link {
font-size: 2em;
padding: 30px 20px;
}
}
/* Tablet optimizations */
@media (min-width: 481px) and (max-width: 768px) {
.container {
padding: 50px 40px;
}
.title {
font-size: 3.5em;
}
.app-link {
font-size: 3em;
padding: 50px 40px;
min-height: 140px;
}
}
/* Large tablet/desktop */
@media (min-width: 769px) {
.container {
padding: 60px 50px;
}
.title {
font-size: 4em;
}
.app-link {
font-size: 3.5em;
padding: 60px 50px;
min-height: 160px;
}
}
/* Touch-friendly adjustments */
@media (pointer: coarse) {
.app-link {
min-height: 120px;
padding: 40px;
}
}
/* Subtraction-specific styling */
.app-link.subtraction {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
}
.app-link.subtraction:hover,
.app-link.subtraction:active {
background: linear-gradient(135deg, #38f9d7 0%, #43e97b 100%);
}
/* Skip counting-specific styling */
.app-link.skip-counting {
background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 100%);
}
.app-link.skip-counting:hover,
.app-link.skip-counting:active {
background: linear-gradient(135deg, #fecfef 0%, #ff9a9e 100%);
}
</style>
</head>
<body>
<div class="container">
<h1 class="title">🎓 Learning Time! 🎓</h1>
<p class="subtitle">Tap to start learning math!</p>
<a href="flashcards/addition.html" class="app-link">
<div class="emoji"></div>
<div class="link-text">Addition</div>
</a>
<a href="flashcards/subtraction.html" class="app-link subtraction">
<div class="emoji"></div>
<div class="link-text">Subtraction</div>
</a>
<a href="skipcount/twos.html" class="app-link skip-counting">
<div class="emoji">⏭️ 2</div>
<div class="link-text">Skip Counting by 2s</div>
</a>
<div class="footer">
<p>Fun math games for little learners! 🌟</p>
</div>
</div>
</body>
</html>

View File

@@ -0,0 +1,471 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Toddler Math Flash Cards - Skip Counting by 2s!</title>
<link href="https://fonts.googleapis.com/css2?family=Nunito:wght@400;600;700;800&display=swap" rel="stylesheet">
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
display: flex;
justify-content: center;
align-items: flex-start;
color: #333;
padding: 10px 0;
}
.app-container {
background: white;
border-radius: 20px;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
padding: 20px;
text-align: center;
max-width: 500px;
width: 95%;
margin: 10px auto;
}
.title {
color: #4a5568;
font-size: 2.2em;
margin-bottom: 15px;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
}
.flashcard {
background: linear-gradient(135deg, #ff9a9e 0%, #fecfef 100%);
border-radius: 15px;
padding: 25px;
margin: 15px 0;
box-shadow: 0 10px 20px rgba(0, 0, 0, 0.1);
transition: transform 0.3s ease;
}
.flashcard:hover {
transform: scale(1.05);
}
.equation {
font-size: 3.5em;
font-weight: bold;
color: white;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
margin-bottom: 10px;
}
.sequence-display {
font-size: 2.2em;
font-weight: bold;
color: white;
text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.3);
margin-bottom: 15px;
line-height: 1.2;
}
.answer-section {
margin: 20px 0;
}
.answer-input {
font-size: 2.5em;
padding: 12px 15px;
border: 4px solid #667eea;
border-radius: 15px;
text-align: center;
width: 130px;
margin: 0 auto 15px auto;
display: block;
font-family: 'Nunito', sans-serif;
}
.answer-input:focus {
outline: none;
border-color: #ff9a9e;
box-shadow: 0 0 20px rgba(255, 154, 158, 0.3);
}
.buttons {
display: flex;
gap: 15px;
justify-content: center;
margin-top: 20px;
}
.btn {
font-size: 1.5em;
padding: 15px 30px;
border: none;
border-radius: 50px;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
transition: all 0.3s ease;
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2);
}
.btn-check {
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
color: white;
}
.btn-next {
background: linear-gradient(135deg, #43e97b 0%, #38f9d7 100%);
color: white;
}
.btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.3);
}
.feedback {
font-size: 2.5em;
font-weight: bold;
margin: 20px 0;
padding: 20px;
border-radius: 15px;
transition: all 0.5s ease;
}
.correct {
background: linear-gradient(135deg, #84fab0 0%, #8fd3f4 100%);
color: #2d5a27;
}
.incorrect {
background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 100%);
color: #8b4513;
}
.score {
background: rgba(255, 255, 255, 0.9);
padding: 8px 15px;
border-radius: 20px;
font-size: 1em;
font-weight: bold;
color: #4a5568;
margin-bottom: 15px;
display: inline-block;
}
.celebration {
font-size: 3em;
animation: bounce 1s infinite;
}
@keyframes bounce {
0%,
20%,
50%,
80%,
100% {
transform: translateY(0);
}
40% {
transform: translateY(-30px);
}
60% {
transform: translateY(-15px);
}
}
.number-buttons {
display: grid;
grid-template-columns: repeat(4, 1fr);
gap: 10px;
margin: 15px 0;
}
.number-buttons .number-btn:last-child {
grid-column: span 2;
}
.number-btn {
font-size: 1.8em;
padding: 12px;
border: 3px solid #667eea;
border-radius: 12px;
background: white;
cursor: pointer;
font-family: 'Nunito', sans-serif;
font-weight: bold;
color: #667eea;
transition: all 0.2s ease;
min-height: 50px;
}
.number-btn:hover {
background: #667eea;
color: white;
transform: scale(1.1);
}
.hint {
font-size: 1.2em;
color: #666;
margin-bottom: 10px;
font-style: italic;
}
@media (max-width: 600px) {
.app-container {
padding: 15px;
margin: 5px auto;
}
.title {
font-size: 1.8em;
margin-bottom: 10px;
}
.equation {
font-size: 2.8em;
}
.sequence-display {
font-size: 1.8em;
}
.answer-input {
font-size: 2.2em;
width: 110px;
}
.number-btn {
font-size: 1.6em;
padding: 10px;
min-height: 45px;
}
.btn {
font-size: 1.3em;
padding: 12px 25px;
}
}
@media (max-height: 700px) {
.flashcard {
padding: 20px;
margin: 10px 0;
}
.equation {
font-size: 3em;
}
.sequence-display {
font-size: 2em;
}
.number-btn {
font-size: 1.6em;
padding: 8px;
min-height: 40px;
}
}
</style>
</head>
<body>
<div class="app-container">
<div class="score" id="score">Score: 0/0</div>
<h1 class="title">⏭️ Skip Counting by 2s</h1>
<div class="flashcard">
<div class="hint">What comes next?</div>
<div class="sequence-display" id="sequenceDisplay">2, 4, 6, ?</div>
</div>
<div class="answer-section">
<input type="number" class="answer-input" id="answerInput" placeholder="?" min="0" max="100">
<div class="number-buttons" id="numberButtons">
<button class="number-btn" onclick="inputNumber(0)">0</button>
<button class="number-btn" onclick="inputNumber(1)">1</button>
<button class="number-btn" onclick="inputNumber(2)">2</button>
<button class="number-btn" onclick="inputNumber(3)">3</button>
<button class="number-btn" onclick="inputNumber(4)">4</button>
<button class="number-btn" onclick="inputNumber(5)">5</button>
<button class="number-btn" onclick="inputNumber(6)">6</button>
<button class="number-btn" onclick="inputNumber(7)">7</button>
<button class="number-btn" onclick="inputNumber(8)">8</button>
<button class="number-btn" onclick="inputNumber(9)">9</button>
<button class="number-btn" onclick="inputNumber(10)">10</button>
<button class="number-btn" onclick="clearAnswer()"
style="background: #ff6b6b; color: white;">Clear</button>
</div>
</div>
<div class="feedback" id="feedback" style="display: none;"></div>
<div class="buttons">
<button class="btn btn-check" onclick="checkAnswer()">Check Answer! 🎯</button>
<button class="btn btn-next" onclick="nextCard()" style="display: none;" id="nextBtn">Next Card! ➡️</button>
</div>
</div>
<script>
let currentSequence = [];
let correctAnswer;
let totalQuestions = 0;
let correctAnswers = 0;
let sequenceLength = 3; // Start with 3 numbers shown, ask for 4th
function generateNewCard() {
// Generate a random starting point for skip counting by 2s
// Start with even numbers from 0 to 10, then continue the sequence
const startNumber = Math.floor(Math.random() * 6) * 2; // 0, 2, 4, 6, 8, 10
// Create sequence of specified length
currentSequence = [];
for (let i = 0; i < sequenceLength; i++) {
currentSequence.push(startNumber + (i * 2));
}
// The correct answer is the next number in the sequence
correctAnswer = startNumber + (sequenceLength * 2);
// Display the sequence with a question mark for the next number
const sequenceDisplay = currentSequence.join(', ') + ', ?';
document.getElementById('sequenceDisplay').textContent = sequenceDisplay;
// Reset the input and feedback
document.getElementById('answerInput').value = '';
document.getElementById('feedback').style.display = 'none';
document.getElementById('nextBtn').style.display = 'none';
// Don't focus on input to prevent keyboard from showing on mobile
}
function inputNumber(num) {
const input = document.getElementById('answerInput');
const currentValue = input.value;
if (currentValue === '' || currentValue === '0') {
input.value = num;
} else if (currentValue.length < 3) { // Limit to 3 digits max for larger numbers
input.value = currentValue + num;
}
}
function clearAnswer() {
document.getElementById('answerInput').value = '';
}
function checkAnswer() {
const userAnswer = parseInt(document.getElementById('answerInput').value);
const feedbackEl = document.getElementById('feedback');
if (isNaN(userAnswer)) {
alert('Please enter a number! 😊');
return;
}
totalQuestions++;
if (userAnswer === correctAnswer) {
correctAnswers++;
feedbackEl.innerHTML = `<div class="celebration">🎉</div>Perfect! The sequence is: ${currentSequence.join(', ')}, ${correctAnswer}`;
feedbackEl.className = 'feedback correct';
// Play success sound (if browser supports it)
playSound('success');
// Gradually increase difficulty by showing longer sequences
if (correctAnswers % 5 === 0 && sequenceLength < 5) {
sequenceLength++;
}
} else {
feedbackEl.innerHTML = `<div>Try again! 💪</div>The sequence is: ${currentSequence.join(', ')}, ${correctAnswer}`;
feedbackEl.className = 'feedback incorrect';
// Play try again sound (if browser supports it)
playSound('tryAgain');
}
feedbackEl.style.display = 'block';
document.getElementById('nextBtn').style.display = 'inline-block';
updateScore();
}
function nextCard() {
generateNewCard();
}
function updateScore() {
const percentage = totalQuestions > 0 ? Math.round((correctAnswers / totalQuestions) * 100) : 0;
document.getElementById('score').textContent = `Score: ${correctAnswers}/${totalQuestions} (${percentage}%)`;
}
function playSound(type) {
// Create audio context for simple beep sounds
try {
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const oscillator = audioContext.createOscillator();
const gainNode = audioContext.createGain();
oscillator.connect(gainNode);
gainNode.connect(audioContext.destination);
if (type === 'success') {
// Happy ascending notes
oscillator.frequency.setValueAtTime(523.25, audioContext.currentTime); // C5
oscillator.frequency.setValueAtTime(659.25, audioContext.currentTime + 0.1); // E5
oscillator.frequency.setValueAtTime(783.99, audioContext.currentTime + 0.2); // G5
} else {
// Gentle encouraging tone
oscillator.frequency.setValueAtTime(440, audioContext.currentTime); // A4
}
gainNode.gain.setValueAtTime(0, audioContext.currentTime);
gainNode.gain.linearRampToValueAtTime(0.1, audioContext.currentTime + 0.01);
gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + 0.3);
oscillator.start(audioContext.currentTime);
oscillator.stop(audioContext.currentTime + 0.3);
} catch (e) {
// Audio not supported, silent fail
}
}
// Keyboard support
document.getElementById('answerInput').addEventListener('keypress', function (e) {
if (e.key === 'Enter') {
if (document.getElementById('nextBtn').style.display === 'none') {
checkAnswer();
} else {
nextCard();
}
}
});
// Initialize the first card when page loads
window.onload = function () {
generateNewCard();
updateScore();
};
// Prevent negative numbers
document.getElementById('answerInput').addEventListener('input', function (e) {
if (e.target.value < 0) {
e.target.value = 0;
}
});
</script>
</body>
</html>

0
margotwood/style.css Normal file
View File

4
media/.env.example Normal file
View File

@@ -0,0 +1,4 @@
# WireGuard Configuration
WIREGUARD_PRIVATE_KEY=
WIREGUARD_PRESHARED_KEY=
WIREGUARD_ADDRESSES=

View File

@@ -1,4 +1,3 @@
version: "3"
services: services:
gluetun: gluetun:
image: qmcgaw/gluetun:latest image: qmcgaw/gluetun:latest
@@ -13,32 +12,193 @@ services:
- 8388:8388/tcp # Shadowsocks - 8388:8388/tcp # Shadowsocks
- 8388:8388/udp # Shadowsocks - 8388:8388/udp # Shadowsocks
- 8080:8080/tcp # sabnzbd webUI - 8080:8080/tcp # sabnzbd webUI
- 8085:8085/tcp # sabnzbd webUI
volumes: volumes:
- /vpn-gluetun:/gluetun - gluetun_data:/gluetun
environment: environment:
# See https://github.com/qdm12/gluetun/wiki # See https://github.com/qdm12/gluetun/wiki
- VPN_SERVICE_PROVIDER=protonvpn - VPN_SERVICE_PROVIDER=airvpn
# Wireguard: - VPN_TYPE=wireguard
- OPENVPN_USER=0O1JuJFnG4GOFauGOc572pR0 - WIREGUARD_PRIVATE_KEY=${WIREGUARD_PRIVATE_KEY}
- OPENVPN_PASSWORD=8HOCqw4zvv8mbX4bLAUpLT3z - WIREGUARD_PRESHARED_KEY=${WIREGUARD_PRESHARED_KEY}
- WIREGUARD_ADDRESSES=${WIREGUARD_ADDRESSES}
- UPDATER_PERIOD:24h
- TZ=America/New_York # Timezone for accurate log times - TZ=America/New_York # Timezone for accurate log times
# optional, server_[countries, cities] - SERVER_COUNTRIES=United States
# - SERVER_COUNTRIES: Comma separated list of countries
# - SERVER_CITIES=Stockholm
# - SERVER_HOSTNAMES: Comma separated list of server hostnames
restart: always restart: always
labels:
- diun.enable=true
sabnzbd: sabnzbd:
image: lscr.io/linuxserver/sabnzbd:latest image: lscr.io/linuxserver/sabnzbd:latest
container_name: sabnzbd container_name: sabnzbd
environment: environment:
- PUID=1000 - PUID=1000
- PGID=1000 - PGID=1000
- UMASK=022
- TZ=America/New_York - TZ=America/New_York
volumes: volumes:
- /mnt/d/docker/sabnzbd/config:/config - sabnzbd_data:/config
- /mnt/d/media/downloads/sabnzbd/complete:/downloads - /data/usenet/downloads:/downloads
- /mnt/d/media/downloads/sabnzbd/incomplete:/incomplete-downloads - /data/usenet/incomplete-downloads:/incomplete-downloads
# network_mode: "service:gluetun" forces sabnzbd to connect to the internet through the VPN defined in the gluetun container above # network_mode: "service:gluetun" forces sabnzbd to connect to the internet through the VPN defined in the gluetun container above
network_mode: "service:gluetun" network_mode: service:gluetun
depends_on:
gluetun:
condition: service_healthy
restart: always restart: always
labels:
- diun.enable=true
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
environment:
- PUID=1000
- PGID=1000
- UMASK=022
- TZ=America/New_York
volumes:
- /docker/config/sonarr:/config
- /mnt/share/media/tv:/tv
- /mnt/share/media/anime:/anime
- /mnt/share/media/babies:/babies
- /data/usenet/downloads:/downloads
ports:
- 8989:8989
restart: always
labels:
- diun.enable=true
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
environment:
- PUID=1000
- PGID=1000
- UMASK=022
- TZ=America/New_York
volumes:
- /docker/config/radarr:/config
- /mnt/share/media:/data
- /mnt/share/media/movies:/movies
- /mnt/share/media/movies_kids:/movies_kids
- /data/usenet/downloads:/downloads
ports:
- 7878:7878
restart: always
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- /docker/config/prowlarr:/config
ports:
- 9696:9696
restart: always
labels:
- diun.enable=true
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
environment:
- LOG_LEVEL=debug
- TZ=America/New_York
ports:
- 5055:5055
volumes:
- /docker/config/jellyseerr/:/app/config
restart: unless-stopped
labels:
- diun.enable=true
tautulli:
image: lscr.io/linuxserver/tautulli:latest
container_name: tautulli
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
volumes:
- tautulli:/config
ports:
- 8181:8181
restart: always
labels:
- diun.enable=true
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
container_name: audiobookshelf
ports:
- 13378:80
volumes:
- /mnt/share/media/audiobooks:/audiobooks
- /mnt/share/media/books:/books
- /mnt/share/media/podcasts:/podcasts
- /docker/config/audiobookshelf:/config
- ./audiobookshelf/metadata:/metadata
environment:
- TZ=America/New_York
restart: always
labels:
- diun.enable=true
huntarr:
image: huntarr/huntarr:latest
container_name: huntarr
restart: always
ports:
- 9705:9705
volumes:
- huntarr_data:/config
environment:
- TZ=America/New_York
labels:
- diun.enable=true
jellystat-db:
image: postgres:15.2
shm_size: 1gb
container_name: jellystat-db
restart: unless-stopped
logging:
driver: json-file
options:
max-file: "5"
max-size: 10m
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${JELLYSTAT_POSTGRES_PASSWORD}
labels:
- diun.enable=true
volumes:
- postgres_data:/var/lib/postgresql/data
jellystat:
image: cyfershepard/jellystat:latest
container_name: jellystat
restart: unless-stopped
logging:
driver: json-file
options:
max-file: "5"
max-size: 10m
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${JELLYSTAT_POSTGRES_PASSWORD}
POSTGRES_IP: jellystat-db
POSTGRES_PORT: 5432
JWT_SECRET: ${JELLYSTAT_JWT_SECRET}
TZ: America/New_York
labels:
- diun.enable=true
volumes:
- jellystat-backup-data:/app/backend/backup-data
ports:
- 3200:3000
depends_on:
- jellystat-db
networks:
default: null
volumes:
gluetun_data: null
sabnzbd_data: null
tautulli: null
huntarr_data: null
postgres_data: null
jellystat-backup-data: null

17
metube/docker-compose.yml Normal file
View File

@@ -0,0 +1,17 @@
services:
metube:
image: ghcr.io/alexta69/metube:latest
container_name: metube
environment:
- UID=1000
- GID=1000
- TZ=America/New_York
- 'YTDL_OPTIONS={"writesubtitles": "true"}'
- OUTPUT_TEMPLATE=%(timestamp>%Y-%m-%d-%H-%M-%S)S
restart: unless-stopped
ports:
- 7081:8081
volumes:
- /mnt/share/media/metube:/downloads
labels:
- diun.enable=true

5
miningwood/Caddyfile Normal file
View File

@@ -0,0 +1,5 @@
:80 {
root * /usr/share/caddy
encode gzip
file_server
}

View File

@@ -0,0 +1,16 @@
services:
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- 8075:80
- 8043:443
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- ./:/usr/share/caddy
- caddy_data:/data
- caddy_config:/config
volumes:
caddy_data: null
caddy_config: null
networks: {}

140
miningwood/index.html Normal file
View File

@@ -0,0 +1,140 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Mining Wood - Precision. Profit. Power.</title>
<script src="https://cdn.tailwindcss.com"></script>
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;600;700&display=swap" rel="stylesheet">
<style>
body {
font-family: 'Poppins', sans-serif;
}
</style>
</head>
<body class="bg-white text-gray-800">
<header class="bg-white shadow-md sticky top-0 z-50">
<div class="container mx-auto px-4 py-3 flex justify-between items-center">
<a href="https://miningwood.com" class="flex items-center hover:opacity-80 transition-opacity">
<img src="miningwood-logo.png" alt="Mining Wood Logo" class="h-20 mr-3" />
<h1 class="text-2xl font-bold text-blue-800">Mining Wood</h1>
</a>
<nav class="hidden md:flex items-center space-x-6">
<a href="#trading" class="text-gray-600 hover:text-blue-600 font-medium">Day Trading</a>
<a href="#software" class="text-gray-600 hover:text-blue-600 font-medium">Software</a>
<a href="#about" class="text-gray-600 hover:text-blue-600 font-medium">About</a>
<a href="#contact" class="text-gray-600 hover:text-blue-600 font-medium">Contact</a>
</nav>
</div>
</header>
<main>
<section class="bg-gray-800 text-white text-center py-20">
<div class="container mx-auto px-4">
<h2 class="text-cyan-400 text-sm font-bold uppercase tracking-widest">Precision. Profit. Power.</h2>
<h1 class="text-4xl md:text-6xl font-bold mt-2 mb-4">Combining Elite Day Trading with Cutting-Edge Algorithmic
Software.</h1>
<p class="text-lg max-w-2xl mx-auto mb-8 opacity-90">Mining Wood is an exclusive firm operating at the
intersection of high-frequency finance and bespoke technology. We don't just trade; we build the tools that
master the market.</p>
<div class="flex justify-center items-center space-x-4">
<a href="#software"
class="bg-cyan-400 text-gray-900 font-semibold px-8 py-3 rounded-md hover:bg-cyan-500 transition">Explore
Our Software</a>
<a href="#"
class="bg-transparent border-2 border-blue-500 text-blue-500 font-semibold px-8 py-3 rounded-md hover:bg-blue-500 hover:text-white transition">See
Trading Results</a>
</div>
</div>
</section>
<section class="py-16 bg-gray-50" id="trading">
<div class="container mx-auto px-4 text-center">
<h2 class="text-3xl font-bold mb-2">Our Dual Expertise</h2>
<p class="text-gray-600 max-w-3xl mx-auto mb-12">We are unified by a single purpose: to find and exploit
inefficiencies in the market
through superior technology and rigorous analysis.</p>
<div class="grid md:grid-cols-2 gap-8 text-left">
<div class="bg-white p-8 rounded-lg shadow-md">
<span class="text-3xl mb-4 block">📈</span>
<h3 class="text-xl font-bold text-blue-600 mb-2">Day Trading</h3>
<h4 class="font-semibold text-gray-800 mb-3">Data-Driven Execution</h4>
<p class="text-gray-600">Our team of expert traders utilizes quantitative models and real-time data analysis
to execute high-convection trades across various asset classes, focusing on <strong
class="font-semibold">high-probability setups</strong> and <strong class="font-semibold">risk
management</strong>.</p>
</div>
<div class="bg-white p-8 rounded-lg shadow-md">
<span class="text-3xl mb-4 block">💻</span>
<h3 class="text-xl font-bold text-blue-600 mb-2">Software Development</h3>
<h4 class="font-semibold text-gray-800 mb-3">Proprietary Trading Systems</h4>
<p class="text-gray-600">We design, develop, and maintain a suite of proprietary software
solutions—including backtesting engines, execution platforms, and sophisticated <strong
class="font-semibold">machine learning</strong> models—to give our traders an unmatched edge.
</p>
</div>
</div>
</div>
</section>
<section class="py-16" id="software">
<div class="container mx-auto px-4 text-center">
<h2 class="text-3xl font-bold mb-2">Built to Outperform</h2>
<p class="text-gray-600 max-w-3xl mx-auto mb-12">At the heart of Mining Wood is our technology division. We
specialize in creating
high-speed, reliable tools essential for modern finance.</p>
<div class="grid md:grid-cols-3 gap-8 text-left">
<div class="border border-gray-200 p-6 rounded-lg hover:shadow-lg hover:-translate-y-1 transition-transform">
<h4 class="font-bold text-blue-800 border-b-2 border-cyan-400 inline-block pb-1 mb-4">The 'Vein'
Backtester</h4>
<p class="text-gray-600">Run complex, multi-variable strategies against decades of historical data in
seconds. Optimize parameters
for peak performance before deployment.</p>
</div>
<div class="border border-gray-200 p-6 rounded-lg hover:shadow-lg hover:-translate-y-1 transition-transform">
<h4 class="font-bold text-blue-800 border-b-2 border-cyan-400 inline-block pb-1 mb-4">High-Frequency APIs
</h4>
<p class="text-gray-600">Direct, low-latency connections to major exchanges. Guaranteed speed and
reliability for execution when
every millisecond counts.</p>
</div>
<div class="border border-gray-200 p-6 rounded-lg hover:shadow-lg hover:-translate-y-1 transition-transform">
<h4 class="font-bold text-blue-800 border-b-2 border-cyan-400 inline-block pb-1 mb-4">Risk Monitoring
Dashboard</h4>
<p class="text-gray-600">Instantly visualize real-time exposure, drawdown, and portfolio health across all
active strategies.
Proactive alerting for defined thresholds.</p>
</div>
</div>
</div>
</section>
</main>
<footer class="bg-blue-800 text-white">
<div class="container mx-auto px-4 pt-12 pb-8">
<div class="grid md:grid-cols-3 gap-8 border-b border-blue-700 pb-8">
<div class="md:col-span-1">
<h4 class="text-cyan-400 font-bold mb-3">Mining Wood</h4>
<p class="text-sm">Jacksonville, Florida</p>
</div>
<div>
<h4 class="text-cyan-400 font-bold mb-3">Connect</h4>
<p class="text-sm">Email: <a href="mailto:contact@miningwood.com"
class="underline hover:text-cyan-300">contact
[at] miningwood.com</a></p>
</div>
</div>
<div class="text-center text-sm pt-6">
<p>&copy; 2025 Mining Wood. All Rights Reserved.</p>
</div>
</div>
</footer>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

0
miningwood/style.css Normal file
View File

View File

@@ -0,0 +1,14 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<script src="https://gist.ptrwd.com/acedanger/stock-entry.js"></script>
</body>
</html>

View File

@@ -0,0 +1,327 @@
import warnings
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
warnings.filterwarnings('ignore')
class TechnicalAnalyzer:
def __init__(self, data):
"""
Initialize with price data DataFrame
Expected columns: ['date', 'open', 'high', 'low', 'close', 'volume']
"""
self.data = data.copy()
self.signals = pd.DataFrame()
def calculate_sma(self, period):
"""Simple Moving Average"""
return self.data['close'].rolling(window=period).mean()
def calculate_ema(self, period):
"""Exponential Moving Average"""
return self.data['close'].ewm(span=period).mean()
def calculate_rsi(self, period=14):
"""Relative Strength Index"""
delta = self.data['close'].diff()
gain = (delta.where(delta > 0, 0)).rolling(window=period).mean()
loss = (-delta.where(delta < 0, 0)).rolling(window=period).mean()
rs = gain / loss
rsi = 100 - (100 / (1 + rs))
return rsi
def calculate_macd(self, fast=12, slow=26, signal=9):
"""MACD Indicator"""
ema_fast = self.calculate_ema(fast)
ema_slow = self.calculate_ema(slow)
macd_line = ema_fast - ema_slow
signal_line = macd_line.ewm(span=signal).mean()
histogram = macd_line - signal_line
return macd_line, signal_line, histogram
def calculate_bollinger_bands(self, period=20, std_dev=2):
"""Bollinger Bands"""
sma = self.calculate_sma(period)
std = self.data['close'].rolling(window=period).std()
upper_band = sma + (std * std_dev)
lower_band = sma - (std * std_dev)
return upper_band, sma, lower_band
def calculate_atr(self, period=14):
"""Average True Range"""
high_low = self.data['high'] - self.data['low']
high_close = np.abs(self.data['high'] - self.data['close'].shift())
low_close = np.abs(self.data['low'] - self.data['close'].shift())
ranges = pd.concat([high_low, high_close, low_close], axis=1)
true_range = np.max(ranges, axis=1)
atr = true_range.rolling(window=period).mean()
return atr
def calculate_volume_indicators(self):
"""Volume-based indicators"""
# Volume Moving Average
vol_sma_20 = self.data['volume'].rolling(window=20).mean()
vol_ratio = self.data['volume'] / vol_sma_20
# On Balance Volume (OBV)
obv = (np.sign(self.data['close'].diff()) *
self.data['volume']).fillna(0).cumsum()
return vol_ratio, obv
def generate_all_indicators(self):
"""Calculate all technical indicators"""
# Moving Averages
self.data['sma_20'] = self.calculate_sma(20)
self.data['sma_50'] = self.calculate_sma(50)
self.data['ema_12'] = self.calculate_ema(12)
self.data['ema_26'] = self.calculate_ema(26)
# RSI
self.data['rsi'] = self.calculate_rsi()
# MACD
macd, signal, histogram = self.calculate_macd()
self.data['macd'] = macd
self.data['macd_signal'] = signal
self.data['macd_histogram'] = histogram
# Bollinger Bands
bb_upper, bb_middle, bb_lower = self.calculate_bollinger_bands()
self.data['bb_upper'] = bb_upper
self.data['bb_middle'] = bb_middle
self.data['bb_lower'] = bb_lower
# ATR
self.data['atr'] = self.calculate_atr()
# Volume indicators
vol_ratio, obv = self.calculate_volume_indicators()
self.data['vol_ratio'] = vol_ratio
self.data['obv'] = obv
return self.data
def identify_entry_signals(self):
"""Identify potential entry points"""
signals = []
for i in range(1, len(self.data)):
entry_score = 0
reasons = []
current = self.data.iloc[i]
previous = self.data.iloc[i-1]
# Moving Average Crossover (Golden Cross)
if (current['sma_20'] > current['sma_50'] and
previous['sma_20'] <= previous['sma_50']):
entry_score += 2
reasons.append("SMA Golden Cross")
# Price above both MAs
if current['close'] > current['sma_20'] > current['sma_50']:
entry_score += 1
reasons.append("Price above MAs")
# RSI oversold recovery
if previous['rsi'] < 30 and current['rsi'] > 30:
entry_score += 2
reasons.append("RSI oversold recovery")
# MACD bullish crossover
if (current['macd'] > current['macd_signal'] and
previous['macd'] <= previous['macd_signal']):
entry_score += 2
reasons.append("MACD bullish crossover")
# Bollinger Band bounce
if previous['close'] <= previous['bb_lower'] and current['close'] > previous['bb_lower']:
entry_score += 1
reasons.append("BB lower band bounce")
# Volume confirmation
if current['vol_ratio'] > 1.5: # 50% above average
entry_score += 1
reasons.append("High volume")
# Strong overall conditions
if (current['rsi'] > 40 and current['rsi'] < 70 and
current['macd'] > 0):
entry_score += 1
reasons.append("Favorable momentum")
if entry_score >= 3: # Minimum threshold for entry
signals.append({
'date': current['date'],
'type': 'ENTRY',
'price': current['close'],
'score': entry_score,
'reasons': reasons
})
return signals
def identify_exit_signals(self):
"""Identify potential exit points"""
signals = []
for i in range(1, len(self.data)):
exit_score = 0
reasons = []
current = self.data.iloc[i]
previous = self.data.iloc[i-1]
# Moving Average bearish cross
if (current['sma_20'] < current['sma_50'] and
previous['sma_20'] >= previous['sma_50']):
exit_score += 2
reasons.append("SMA Death Cross")
# Price below key MA
if current['close'] < current['sma_20']:
exit_score += 1
reasons.append("Price below SMA20")
# RSI overbought
if current['rsi'] > 70:
exit_score += 1
reasons.append("RSI overbought")
# RSI bearish divergence (simplified)
if previous['rsi'] > 70 and current['rsi'] < 70:
exit_score += 2
reasons.append("RSI overbought exit")
# MACD bearish crossover
if (current['macd'] < current['macd_signal'] and
previous['macd'] >= previous['macd_signal']):
exit_score += 2
reasons.append("MACD bearish crossover")
# Bollinger Band upper touch
if current['close'] >= current['bb_upper']:
exit_score += 1
reasons.append("BB upper band resistance")
# Volume spike (could indicate distribution)
if current['vol_ratio'] > 3.0:
exit_score += 1
reasons.append("Extreme volume spike")
if exit_score >= 3: # Minimum threshold for exit
signals.append({
'date': current['date'],
'type': 'EXIT',
'price': current['close'],
'score': exit_score,
'reasons': reasons
})
return signals
def analyze_stock(self):
"""Complete analysis workflow"""
# Generate all indicators
self.generate_all_indicators()
# Get entry and exit signals
entry_signals = self.identify_entry_signals()
exit_signals = self.identify_exit_signals()
# Combine all signals
all_signals = entry_signals + exit_signals
all_signals = sorted(all_signals, key=lambda x: x['date'])
return all_signals, self.data
# Example usage and demo data generation
def generate_sample_data(days=252):
"""Generate sample stock data for demonstration"""
np.random.seed(42) # For reproducible results
start_date = datetime.now() - timedelta(days=days)
dates = [start_date + timedelta(days=i) for i in range(days)]
# Generate realistic price movement
returns = np.random.normal(0.001, 0.02, days) # Daily returns
price = 100 # Starting price
prices = [price]
for ret in returns[1:]:
price *= (1 + ret)
prices.append(price)
# Generate OHLC data
data = []
for i, (date, close) in enumerate(zip(dates, prices)):
high = close * (1 + abs(np.random.normal(0, 0.015)))
low = close * (1 - abs(np.random.normal(0, 0.015)))
open_price = low + (high - low) * np.random.random()
volume = int(np.random.normal(1000000, 300000))
data.append({
'date': date,
'open': open_price,
'high': high,
'low': low,
'close': close,
'volume': max(volume, 100000) # Ensure positive volume
})
return pd.DataFrame(data)
# Demo execution
if __name__ == "__main__":
# Generate sample data
print("Generating sample stock data...")
sample_data = generate_sample_data(180) # 6 months of data
# Initialize analyzer
analyzer = TechnicalAnalyzer(sample_data)
# Run complete analysis
print("Analyzing technical indicators...")
signals, enhanced_data = analyzer.analyze_stock()
# Display results
print("\n=== TECHNICAL ANALYSIS RESULTS ===")
print(f"Analysis period: {len(sample_data)} days")
print(f"Total signals found: {len(signals)}")
# Show recent indicators
print("\n=== LATEST INDICATOR VALUES ===")
latest = enhanced_data.iloc[-1]
print(f"Price: ${latest['close']:.2f}")
print(f"RSI: {latest['rsi']:.2f}")
print(f"MACD: {latest['macd']:.4f}")
print(f"Volume Ratio: {latest['vol_ratio']:.2f}x")
print(f"20-day SMA: ${latest['sma_20']:.2f}")
print(f"50-day SMA: ${latest['sma_50']:.2f}")
# Show recent signals
print("\n=== RECENT SIGNALS ===")
recent_signals = [s for s in signals if s['date']
>= (datetime.now() - timedelta(days=30))]
if recent_signals:
for signal in recent_signals[-5:]: # Last 5 signals
print(f"\n{signal['type']} Signal:")
print(f" Date: {signal['date'].strftime('%Y-%m-%d')}")
print(f" Price: ${signal['price']:.2f}")
print(f" Score: {signal['score']}")
print(f" Reasons: {', '.join(signal['reasons'])}")
else:
print("No recent signals found.")
print("\n=== USAGE NOTES ===")
print("1. Replace sample data with real market data from your preferred source")
print("2. Adjust indicator parameters based on your trading style")
print("3. Modify signal thresholds based on backtesting results")
print("4. Always combine with risk management and position sizing")
print("5. Consider market conditions and fundamental analysis")

9
n8n/.env.example Normal file
View File

@@ -0,0 +1,9 @@
POSTGRES_DB=nodemotion
POSTGRES_USER=nodemotion
# openssl rand -base64 18
POSTGRES_PASSWORD=
POSTGRES_NON_ROOT_USER=
# openssl rand -base64 18
POSTGRES_NON_ROOT_PASSWORD=

46
n8n/compose.yml Normal file
View File

@@ -0,0 +1,46 @@
volumes:
db_data:
n8n_data:
services:
postgres:
image: postgres:16
restart: always
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_DB
- POSTGRES_NON_ROOT_USER
- POSTGRES_NON_ROOT_PASSWORD
volumes:
- db_data:/var/lib/postgresql/data
- ./init-data.sh:/docker-entrypoint-initdb.d/init-data.sh
healthcheck:
test:
- CMD-SHELL
- pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}
interval: 5s
timeout: 5s
retries: 10
n8n:
image: docker.n8n.io/n8nio/n8n
restart: always
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=postgres
- DB_POSTGRESDB_PORT=5432
- DB_POSTGRESDB_DATABASE=${POSTGRES_DB}
- DB_POSTGRESDB_USER=${POSTGRES_NON_ROOT_USER}
- DB_POSTGRESDB_PASSWORD=${POSTGRES_NON_ROOT_PASSWORD}
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
- N8N_RUNNERS_ENABLED=true
ports:
- 5678:5678
links:
- postgres
volumes:
- n8n_data:/home/node/.n8n
depends_on:
postgres:
condition: service_healthy

13
n8n/init-data.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
set -e;
if [ -n "${POSTGRES_NON_ROOT_USER:-}" ] && [ -n "${POSTGRES_NON_ROOT_PASSWORD:-}" ]; then
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE USER ${POSTGRES_NON_ROOT_USER} WITH PASSWORD '${POSTGRES_NON_ROOT_PASSWORD}';
GRANT ALL PRIVILEGES ON DATABASE ${POSTGRES_DB} TO ${POSTGRES_NON_ROOT_USER};
GRANT CREATE ON SCHEMA public TO ${POSTGRES_NON_ROOT_USER};
EOSQL
else
echo "SETUP INFO: No Environment variables given!"
fi

13
newt/compose.yml Normal file
View File

@@ -0,0 +1,13 @@
services:
newt:
image: fosrl/newt
container_name: newt
restart: unless-stopped
environment:
- PANGOLIN_ENDPOINT=https://pangolin.acedanger.com
- NEWT_ID=${NEWT_ID}
- NEWT_SECRET=${NEWT_SECRET}
- ACCEPT_CLIENTS=true
labels:
- diun.enable=true
networks: {}

19
ntfy/docker-compose.yml Normal file
View File

@@ -0,0 +1,19 @@
services:
ntfy:
image: binwiederhier/ntfy:latest
container_name: ntfy
command:
- serve
environment:
- TZ=America/New_York
user: 1000:1000
volumes:
- ./var/cache/ntfy:/var/cache/ntfy
- ./etc/ntfy:/etc/ntfy
- ./server.yml:/etc/ntfy/server.yml
ports:
- 4080:80
- 4443:443
restart: unless-stopped
labels:
- diun.enable=true

5
ntfy/server.yml Normal file
View File

@@ -0,0 +1,5 @@
base-url: "https://notify.peterwood.rocks"
upstream-base-url: "https://ntfy.sh"
# attachment-cache-dir: "/var/cache/ntfy/attachments"
# auth-file: "./var/lib/ntfy/user.db"
auth-default-access: "read-write"

7
ntfy/update-config.sh Executable file
View File

@@ -0,0 +1,7 @@
#/bin/bash
cd /home/acedanger/docker/ntfy
docker cp /home/acedanger/docker/ntfy/server.yml ntfy:/etc/ntfy/server.yml
docker compose down
docker compose pull
docker compose up -d

9
omni-tools/compose.yaml Normal file
View File

@@ -0,0 +1,9 @@
services:
omni-tools:
image: iib0011/omni-tools:latest
container_name: omni-tools
restart: unless-stopped
ports:
- 9980:80
labels:
- diun.enable=true

30
opengist/compose.yaml Normal file
View File

@@ -0,0 +1,30 @@
services:
opengist:
container_name: opengist
image: ghcr.io/thomiceli/opengist:latest
depends_on:
- postgres
restart: unless-stopped
environment:
OG_GITHUB_CLIENT_KEY: ${OG_GITHUB_CLIENT_KEY}
OG_GITHUB_SECRET: ${OG_GITHUB_SECRET}
OG_EXTERNAL_URL: https://gist.ptrwd.com
OG_DB_URI: "postgres://${PG_USER}:${PG_PASSWORD}@postgres:5432/${PG_DATABASE}"
ports:
- 6157:6157
- 2322:2222
volumes:
- $HOME/.opengist:/opengist
labels:
- diun.enable=true
postgres:
image: postgres:16.4
restart: unless-stopped
volumes:
- ./opengist-database:/var/lib/postgresql/data
environment:
POSTGRES_USER: ${PG_USER}
POSTGRES_PASSWORD: ${PG_PASSWORD}
POSTGRES_DB: ${PG_DATABASE}
labels:
- diun.enable=true

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Alternative script for importing GitHub Gists into OpenGist
# This version handles authentication differently to avoid URL parsing issues
github_user=acedanger
opengist_user=acedanger
opengist_password="Q\$R#rGV0tMGeIc1#"
opengist_host="gist.ptrwd.com"
curl -s https://api.github.com/users/"$github_user"/gists?per_page=100 | jq '.[] | .git_pull_url' -r | while read url; do
git clone "$url"
repo_dir=$(basename "$url" .git)
# Add remote, push, and remove the directory
if [ -d "$repo_dir" ]; then
cd "$repo_dir"
# Set up Git credentials for this repository
git config credential.helper store
echo "https://$opengist_user:$opengist_password@$opengist_host" > .git-credentials
git config credential.helper "store --file=.git-credentials"
# Add remote and push
git remote add gist "https://$opengist_host/init"
git push -u gist --all
# Clean up
rm -f .git-credentials
cd ..
rm -rf "$repo_dir"
fi
done

View File

@@ -0,0 +1,37 @@
#!/bin/bash
# GitHub Gist import script using GitHub token for authentication
# This script works by first logging in via GitHub OAuth and then importing
github_user=acedanger
opengist_url="https://gist.ptrwd.com"
echo "This script requires you to first log in to OpenGist via GitHub OAuth."
echo "Please visit: $opengist_url"
echo "Log in with your GitHub account, then return here and press Enter to continue..."
read -p "Press Enter when you've logged in via GitHub OAuth: "
echo "Starting GitHub Gist import..."
curl -s https://api.github.com/users/"$github_user"/gists?per_page=100 | jq '.[] | .git_pull_url' -r | while read url; do
echo "Processing gist: $url"
git clone "$url"
repo_dir=$(basename "$url" .git)
if [ -d "$repo_dir" ]; then
cd "$repo_dir"
# Use the web-based authentication by prompting the user
echo "For repository $repo_dir, you'll need to authenticate via browser..."
echo "When prompted, use your GitHub credentials that you used to log in to OpenGist"
# Add remote and push
git remote add gist "$opengist_url/init"
git push -u gist --all
cd ..
rm -rf "$repo_dir"
echo "Completed import of $repo_dir"
fi
done
echo "Import process completed!"

22
opengist/import-github-gist.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
# This script imports all GitHub Gists into OpenGist.
github_user=acedanger
opengist_user=acedanger
opengist_password="Q\$R%23rGV0tMGeIc1%23" # URL-encoded password (# becomes %23)
opengist_url="http://$opengist_user:$opengist_password@localhost:6157/init"
curl -s https://api.github.com/users/"$github_user"/gists?per_page=100 | jq '.[] | .git_pull_url' -r | while read url; do
git clone "$url"
repo_dir=$(basename "$url" .git)
# Add remote, push, and remove the directory
if [ -d "$repo_dir" ]; then
cd "$repo_dir"
git remote add gist "$opengist_url"
git push -u gist --all
cd ..
rm -rf "$repo_dir"
fi
done

372
pangolin/add_domain.sh Executable file
View File

@@ -0,0 +1,372 @@
#!/bin/bash
# Script to add domains to Pangolin's config.yml file with validation and automatic restart
# Usage: ./add_domain.sh domain_name cert_resolver
# Set constants
readonly CONFIG_FILE="./config/config.yml"
readonly BACKUP_FILE="./config/config.yml.bak"
readonly DEFAULT_CERT_RESOLVER="letsencrypt"
# Colors for terminal output
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[0;33m'
readonly BLUE='\033[0;34m'
readonly NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to check if a command exists
command_exists() {
command -v "$1" &> /dev/null
}
# Function to validate domain name format
validate_domain_format() {
local domain="$1"
# Check if the domain matches a basic domain format
if ! [[ "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9-]{1,61}[a-zA-Z0-9]\.[a-zA-Z]{2,}(\.[a-zA-Z]{2,})?$ ]]; then
log_error "Invalid domain format. Please enter a valid domain like 'example.com'"
return 1
fi
return 0
}
# Function to check DNS resolution - optimized to try tools in order of preference
check_dns_resolution() {
local domain="$1"
local ip=""
log_info "Checking if domain '$domain' is properly configured in DNS..."
# Use the best available DNS checking tool
if command_exists dig; then
ip=$(dig +short "$domain" A | head -1)
elif command_exists nslookup; then
ip=$(nslookup "$domain" | grep 'Address:' | tail -1 | awk '{print $2}')
elif command_exists host; then
log_warning "'dig' and 'nslookup' not found, using basic 'host' command which may be less reliable."
ip=$(host "$domain" | grep 'has address' | head -1 | awk '{print $4}')
else
log_warning "No DNS resolution tools found (dig, nslookup, or host). Skipping DNS check."
return 0
fi
if [[ -z "$ip" || ! "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
log_warning "Domain '$domain' does not resolve to an IP address."
log_warning "The domain should have an A or AAAA record pointing to your server IP address."
# Ask user if they want to proceed despite DNS warning
read -p "Do you want to proceed anyway? (y/n): " proceed
if [[ ! "$proceed" =~ ^[Yy]$ ]]; then
log_error "Operation canceled. Please configure DNS properly and try again."
return 1
fi
else
log_success "Domain '$domain' resolves to IP: $ip"
fi
return 0
}
# Function to validate a domain (format and DNS)
validate_domain() {
local domain="$1"
if ! validate_domain_format "$domain"; then
return 1
fi
if ! check_dns_resolution "$domain"; then
return 1
fi
return 0
}
# Function to check if the Pangolin stack is running
is_stack_running() {
docker compose ps | grep -q 'pangolin'
}
# Function to wait for stack to be ready - extracted to avoid code duplication
wait_for_stack() {
local timeout=30
local counter=0
log_info "Waiting for stack to be ready..."
while ((counter < timeout)); do
if docker compose ps | grep -q 'pangolin' && docker compose ps | grep -q -v 'starting'; then
log_success "Pangolin stack is ready!"
return 0
fi
echo -n "."
sleep 2
((counter+=1))
done
log_error "Timeout waiting for stack to be ready. Please check your logs."
return 1
}
# Function to restart the Pangolin stack
restart_stack() {
log_info "Restarting Pangolin stack..."
if is_stack_running; then
docker compose down
sleep 2
docker compose up -d
wait_for_stack
else
log_info "Pangolin stack wasn't running. Starting it now..."
docker compose up -d
wait_for_stack
fi
}
# Function to check if the domain already exists in the config
domain_exists() {
local domain="$1"
grep -q "base_domain: \"$domain\"" "$CONFIG_FILE"
}
# Function to get the next domain number
get_next_domain_number() {
local highest_num=0
# Find the highest domain number
while read -r line; do
if [[ "$line" =~ domain([0-9]+): ]]; then
num="${BASH_REMATCH[1]}"
if ((num > highest_num)); then
highest_num=$num
fi
fi
done < <(grep "^ domain[0-9]\+:" "$CONFIG_FILE")
echo $((highest_num + 1))
}
# Function to fix misplaced domains
fix_misplaced_domains() {
log_info "Checking for misplaced domain entries..."
# Check for misplaced domain entries outside the domains section
local misplaced=$(grep -n "domain[0-9]\+:" "$CONFIG_FILE" | grep -v "^[0-9]\+:domains:" | grep -v "^[0-9]\+: domain[0-9]\+:")
if [ -n "$misplaced" ]; then
log_warning "Found misplaced domain entries outside the domains section:"
echo "$misplaced"
# Ask user if they want to fix the misplaced domains
read -p "Do you want to fix these misplaced domains? (y/n): " fix_domains
if [[ "$fix_domains" =~ ^[Yy]$ ]]; then
log_info "Creating a fixed config file..."
# Extract all domain entries from the misplaced location
local extracted_domains=$(awk '
/^[[:space:]]+domain[0-9]+:/ && !/^[[:space:]]+domain[0-9]+:.*domains:/ {
in_domain = 1
domain_name = $0
print domain_name
next
}
in_domain == 1 && /^[[:space:]]+base_domain:/ {
base_domain = $0
print base_domain
next
}
in_domain == 1 && /^[[:space:]]+cert_resolver:/ {
cert_resolver = $0
print cert_resolver
in_domain = 0
next
}' "$CONFIG_FILE")
if [ -n "$extracted_domains" ]; then
log_info "Extracted domains:"
echo "$extracted_domains"
# Remove misplaced domains from the config
sed -i '/^[[:space:]]\+domain[0-9]\+:/,/^[[:space:]]\+cert_resolver:.*$/d' "$CONFIG_FILE"
# Check if domains section exists
if grep -q "^domains:" "$CONFIG_FILE"; then
log_info "Adding extracted domains to the domains section..."
# Find the end of the domains section
local domains_end=$(awk '/^domains:/{in_domains=1} in_domains==1 && /^[a-zA-Z][^:]*:/ && !/^domains:/{print NR-1; exit}' "$CONFIG_FILE")
if [ -z "$domains_end" ]; then
domains_end=$(wc -l < "$CONFIG_FILE")
fi
# Insert the extracted domains at the end of the domains section
sed -i "${domains_end}a\\$(echo "$extracted_domains" | sed 's/^/ /')" "$CONFIG_FILE"
else
log_info "Creating domains section with extracted domains..."
# Find a good spot to insert the domains section (after app section)
local app_end=$(awk '/^app:/{in_app=1} in_app==1 && /^[a-zA-Z][^:]*:/ && !/^app:/{print NR-1; exit}' "$CONFIG_FILE")
if [ -z "$app_end" ]; then
app_end=1
fi
# Insert the domains section with the extracted domains
sed -i "${app_end}a\\domains:\\$(echo "$extracted_domains" | sed 's/^/ /')" "$CONFIG_FILE"
fi
log_success "Fixed misplaced domains."
fi
fi
else
log_info "No misplaced domains found."
fi
}
# Function to add domain to config
add_domain_to_config() {
local domain="$1"
local cert_resolver="$2"
local next_domain_num
# Fix any misplaced domains first
fix_misplaced_domains
# Check if domains section already exists
if grep -q "^domains:" "$CONFIG_FILE"; then
log_info "Domains section exists. Finding the last domain entry..."
# Find the next domain number
next_domain_num=$(get_next_domain_number)
log_info "Using domain$next_domain_num for new entry"
# Find the end of the domains section
local domains_end=$(awk '/^domains:/{in_domains=1} in_domains==1 && /^[a-zA-Z][^:]*:/ && !/^domains:/{print NR-1; exit}' "$CONFIG_FILE")
if [ -z "$domains_end" ]; then
log_info "No next section found after domains, adding to end of file"
domains_end=$(wc -l < "$CONFIG_FILE")
fi
# Use sed to insert the new domain entry at the correct position
sed -i "${domains_end}i\\ domain${next_domain_num}:\\n base_domain: \"${domain}\"\\n cert_resolver: \"${cert_resolver}\"" "$CONFIG_FILE"
else
# Domains section does not exist, need to add it
log_info "Domains section does not exist. Creating it..."
# Find the line where the app section ends
local app_end=$(awk '/^app:/{app=1} app==1 && /^[a-zA-Z][^:]*:/{if($0 !~ /^app:/) {print NR-1; exit}}' "$CONFIG_FILE")
if [ -z "$app_end" ]; then
log_info "Could not find end of app section, adding domains after first blank line"
# Find first blank line
local blank_line=$(grep -n "^$" "$CONFIG_FILE" | head -1 | cut -d: -f1)
if [ -z "$blank_line" ]; then
log_info "No blank line found, adding domains at end of file"
app_end=$(wc -l < "$CONFIG_FILE")
else
log_info "Found blank line at $blank_line, adding domains after it"
app_end=$blank_line
fi
fi
# Use sed to insert the domains section
sed -i "${app_end}a\\\\ndomains:\\n domain1:\\n base_domain: \"${domain}\"\\n cert_resolver: \"${cert_resolver}\"" "$CONFIG_FILE"
next_domain_num=1
fi
# Verify the change was made
log_info "Checking if domain was added:"
grep -A2 -n "domain${next_domain_num}:" "$CONFIG_FILE"
if grep -q "domain${next_domain_num}:" "$CONFIG_FILE" && \
grep -q "base_domain: \"${domain}\"" "$CONFIG_FILE" && \
grep -q "cert_resolver: \"${cert_resolver}\"" "$CONFIG_FILE"; then
log_success "Added domain$next_domain_num: $domain with cert_resolver: $cert_resolver"
return 0
else
log_error "Failed to add domain $domain. Please check the config file manually."
return 1
fi
}
# Main execution starts here
# Check if arguments are provided
if [ $# -lt 1 ]; then
log_error "Missing required arguments."
echo -e "${BLUE}Usage: $0 domain_name [cert_resolver]${NC}"
echo -e "${BLUE}Example: $0 example.com letsencrypt${NC}"
exit 1
fi
# Set domain name from first argument
DOMAIN_NAME=$1
# Set cert resolver from second argument or default
CERT_RESOLVER=${2:-$DEFAULT_CERT_RESOLVER}
# Validate domain
if ! validate_domain "$DOMAIN_NAME"; then
exit 1
fi
# Check if config file exists
if [ ! -f "$CONFIG_FILE" ]; then
log_error "Config file not found at $CONFIG_FILE"
exit 1
fi
# Create backup of config file
cp "$CONFIG_FILE" "$BACKUP_FILE"
log_success "Created backup at $BACKUP_FILE"
# Check if the domain already exists in the config
if domain_exists "$DOMAIN_NAME"; then
log_error "Domain '$DOMAIN_NAME' already exists in the config."
exit 1
fi
# Add domain to config
if ! add_domain_to_config "$DOMAIN_NAME" "$CERT_RESOLVER"; then
log_error "Failed to add domain to config. Reverting changes..."
cp "$BACKUP_FILE" "$CONFIG_FILE"
exit 1
fi
# Ask for confirmation before restarting the stack
log_info "Configuration has been updated."
read -p "Do you want to restart the Pangolin stack now? (y/n): " restart_confirm
if [[ "$restart_confirm" =~ ^[Yy]$ ]]; then
restart_stack
else
log_warning "Stack not restarted. Remember to restart manually for changes to take effect:"
log_info "docker compose down && docker compose up -d"
fi
log_success "Domain $DOMAIN_NAME has been successfully added to the configuration."

View File

@@ -0,0 +1,76 @@
app:
dashboard_url: https://pangolin.acedanger.com
log_level: info
save_logs: false
domains:
domain1:
base_domain: acedanger.com
cert_resolver: letsencrypt
domain2:
base_domain: peterwood.rocks
cert_resolver: letsencrypt
domain3:
base_domain: peterwood.dad
cert_resolver: letsencrypt
domain4:
base_domain: ptrwd.com
cert_resolver: letsencrypt
domain5:
base_domain: margotwood.xyz
cert_resolver: letsencrypt
server:
external_port: 3000
internal_port: 3001
next_port: 3002
internal_hostname: pangolin
session_cookie_name: p_session_token
resource_access_token_param: p_token
resource_access_token_headers:
id: P-Access-Token-Id
token: P-Access-Token
resource_session_request_param: p_session_request
secret: EkiOH3KRHNzde3euT1yTaYIKXchPmHqz
cors:
origins:
- https://pangolin.acedanger.com
methods:
- GET
- POST
- PUT
- DELETE
- PATCH
headers:
- X-CSRF-Token
- Content-Type
credentials: false
traefik:
cert_resolver: letsencrypt
http_entrypoint: web
https_entrypoint: websecure
gerbil:
start_port: 51820
base_endpoint: pangolin.acedanger.com
use_subdomain: false
block_size: 24
site_block_size: 30
subnet_group: 100.89.137.0/20
rate_limits:
global:
window_minutes: 1
max_requests: 500
email:
smtp_host: smtp.fastmail.com
smtp_port: 465
smtp_user: peter@peterwood.dev
smtp_pass: 7v5x943m4g58384q
no_reply: no-reply@peterwood.dev
users:
server_admin:
email: peter@peterwood.dev
password: 23!hA1F^RCjT28
flags:
require_email_verification: true
disable_signup_without_invite: true
disable_user_create_org: false
allow_raw_resources: true
allow_base_domain_resources: true

View File

@@ -0,0 +1 @@
Database and Contents Copyright (c) 2025 MaxMind, Inc.

View File

@@ -0,0 +1,3 @@
Use of this MaxMind product is governed by MaxMind's GeoLite2 End User License Agreement, which can be viewed at https://www.maxmind.com/en/geolite2/eula.
This database incorporates GeoNames [https://www.geonames.org] geographical data, which is made available under the Creative Commons Attribution 4.0 License. To view a copy of this license, visit https://creativecommons.org/licenses/by/4.0/.

View File

@@ -0,0 +1 @@
Latitude and longitude are not precise and should not be used to identify a particular street address or household.

View File

@@ -0,0 +1,84 @@
http:
middlewares:
redirect-to-https:
redirectScheme:
scheme: https
routers:
# HTTP to HTTPS redirect router
main-app-router-redirect:
rule: "Host(`pangolin.acedanger.com`)"
service: next-service
entryPoints:
- web
middlewares:
- redirect-to-https
# Next.js router (handles everything except API and WebSocket paths)
next-router:
rule: "Host(`pangolin.acedanger.com`) && !PathPrefix(`/api/v1`)"
service: next-service
entryPoints:
- websecure
tls:
certResolver: letsencrypt
# API router (handles /api/v1 paths)
api-router:
rule: "Host(`pangolin.acedanger.com`) && PathPrefix(`/api/v1`)"
service: api-service
entryPoints:
- websecure
tls:
certResolver: letsencrypt
# WebSocket router
ws-router:
rule: "Host(`pangolin.acedanger.com`)"
service: api-service
entryPoints:
- websecure
tls:
certResolver: letsencrypt
# Traefik Log Dashboard router
traefik-dashboard-redirect:
rule: "Host(`traefik-logs.acedanger.com`)"
service: traefik-dashboard-service
entryPoints:
- web
middlewares:
- redirect-to-https
traefik-dashboard-router:
rule: "Host(`traefik-logs.acedanger.com`)"
service: traefik-dashboard-service
entryPoints:
- websecure
tls:
certResolver: letsencrypt
services:
next-service:
loadBalancer:
servers:
- url: "http://pangolin:3002" # Next.js server
api-service:
loadBalancer:
servers:
- url: "http://pangolin:3000" # API/WebSocket server
traefik-dashboard-service:
loadBalancer:
servers:
- url: "http://traefik-dashboard:3000"
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -0,0 +1,62 @@
api:
insecure: true
dashboard: true
providers:
http:
endpoint: "http://pangolin:3001/api/v1/traefik-config"
pollInterval: "5s"
file:
filename: "/etc/traefik/dynamic_config.yml"
experimental:
plugins:
badger:
moduleName: "github.com/fosrl/badger"
version: "v1.2.0"
log:
level: "INFO"
format: "common"
accessLog:
filePath: "/var/log/traefik/access.log"
format: "json"
bufferingSize: 100
fields:
defaultMode: "keep"
names:
ClientUsername: "drop"
headers:
defaultMode: "keep"
names:
Authorization: "drop"
Cookie: "drop"
certificatesResolvers:
letsencrypt:
acme:
httpChallenge:
entryPoint: web
email: "peter@peterwood.dev"
storage: "/letsencrypt/acme.json"
caServer: "https://acme-v02.api.letsencrypt.org/directory"
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
transport:
respondingTimeouts:
readTimeout: "30m"
http:
tls:
certResolver: "letsencrypt"
tcp-2229:
address: ":2229/tcp"
tcp-5432:
address: ":5432/tcp"
serversTransport:
insecureSkipVerify: true

112
pangolin/docker-compose.yml Normal file
View File

@@ -0,0 +1,112 @@
name: pangolin
services:
pangolin:
image: fosrl/pangolin:1.12.1
container_name: pangolin
restart: unless-stopped
labels:
- diun.enable=true
volumes:
- ./config:/app/config
healthcheck:
test:
- CMD
- curl
- -f
- http://localhost:3001/api/v1/
interval: 10s
timeout: 10s
retries: 15
gerbil:
image: fosrl/gerbil:latest
container_name: gerbil
restart: unless-stopped
labels:
- diun.enable=true
depends_on:
pangolin:
condition: service_healthy
command:
- --reachableAt=http://gerbil:3003
- --generateAndSaveKeyTo=/var/config/key
- --remoteConfig=http://pangolin:3001/api/v1/gerbil/get-config
- --reportBandwidthTo=http://pangolin:3001/api/v1/gerbil/receive-bandwidth
volumes:
- ./config/:/var/config
cap_add:
- NET_ADMIN
- SYS_MODULE
ports:
- 51820:51820/udp
- 21820:21820/udp # port for ACCEPT_CLIENTS env variable
- 443:443 # Port for traefik because of the network_mode
- 80:80 # Port for traefik because of the network_mode
- 2229:2229 # port for gitea, served from europa; git.ptrwd.com
- 5432:5432 # port for postgres, served from io
traefik:
image: traefik:v3
container_name: traefik
restart: unless-stopped
labels:
- diun.enable=true
network_mode: service:gerbil # Ports appear on the gerbil service
depends_on:
pangolin:
condition: service_healthy
command:
- --configFile=/etc/traefik/traefik_config.yml
volumes:
- ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
traefik-agent:
image: hhftechnology/traefik-log-dashboard-agent:dev-dashboard
container_name: traefik-log-dashboard-agent
restart: unless-stopped
labels:
- diun.enable=true
ports:
- "5000:5000"
volumes:
- ./config/traefik/logs:/logs:ro
- ./config/traefik-dashboard/geoip:/geoip:ro
- ./config/traefik-dashboard/positions:/data
environment:
- TRAEFIK_LOG_DASHBOARD_ACCESS_PATH=/logs/access.log
- TRAEFIK_LOG_DASHBOARD_ERROR_PATH=/logs/access.log
- TRAEFIK_LOG_DASHBOARD_AUTH_TOKEN=${TRAEFIK_DASHBOARD_AUTH_TOKEN}
- TRAEFIK_LOG_DASHBOARD_SYSTEM_MONITORING=true
- TRAEFIK_LOG_DASHBOARD_GEOIP_ENABLED=true
- TRAEFIK_LOG_DASHBOARD_GEOIP_CITY_DB=/geoip/GeoLite2-City.mmdb
- TRAEFIK_LOG_DASHBOARD_GEOIP_COUNTRY_DB=/geoip/GeoLite2-Country.mmdb
- TRAEFIK_LOG_DASHBOARD_LOG_FORMAT=json
- PORT=5000
healthcheck:
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/api/logs/status" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
traefik-dashboard:
image: hhftechnology/traefik-log-dashboard:dev-dashboard
container_name: traefik-log-dashboard
restart: unless-stopped
labels:
- diun.enable=true
ports:
- "3005:3000"
volumes:
- ./config/traefik-dashboard/dashboard:/app/data
environment:
- AGENT_API_URL=http://traefik-agent:5000
- AGENT_API_TOKEN=${TRAEFIK_DASHBOARD_AUTH_TOKEN}
- AGENT_NAME=Pangolin Traefik Agent
- NODE_ENV=production
- PORT=3000
depends_on:
traefik-agent:
condition: service_healthy
networks:
default:
driver: bridge
name: pangolin

View File

@@ -0,0 +1,5 @@
# PostgreSQL Configuration
POSTGRES_DB=paperless
POSTGRES_USER=paperless
POSTGRES_PASSWORD=
COMPOSE_PROJECT_NAME=paperless

View File

@@ -0,0 +1,71 @@
# docker-compose file for running paperless from the Docker Hub.
# This file contains everything paperless needs to run.
# Paperless supports amd64, arm and arm64 hardware.
#
# All compose files of paperless configure paperless in the following way:
#
# - Paperless is (re)started on system boot, if it was running before shutdown.
# - Docker volumes for storing data are managed by Docker.
# - Folders for importing and exporting files are created in the same directory
# as this file and mounted to the correct folders inside the container.
# - Paperless listens on port 8000.
#
# In addition to that, this docker-compose file adds the following optional
# configurations:
#
# - Instead of SQLite (default), PostgreSQL is used as the database server.
#
# To install and update paperless with this file, do the following:
#
# - Copy this file as 'docker-compose.yml' and the files 'docker-compose.env'
# and '.env' into a folder.
# - Run 'docker-compose pull'.
# - Run 'docker-compose run --rm webserver createsuperuser' to create a user.
# - Run 'docker-compose up -d'.
#
# For more extensive installation and update instructions, refer to the
# documentation.
version: "3.4"
services:
broker:
image: redis:6.0
restart: unless-stopped
db:
image: postgres:13
restart: unless-stopped
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: paperless
webserver:
image: jonaswinkler/paperless-ng:latest
restart: unless-stopped
depends_on:
- db
- broker
ports:
- 8010:8000
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000"]
interval: 30s
timeout: 10s
retries: 5
volumes:
- data:/usr/src/paperless/data
- media:/usr/src/paperless/media
- ./export:/usr/src/paperless/export
- ./consume:/usr/src/paperless/consume
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
COMPOSE_PROJECT_NAME: paperless
volumes:
data:
media:
pgdata:

23
papra/docker-compose.yml Normal file
View File

@@ -0,0 +1,23 @@
services:
papra:
image: ghcr.io/papra-hq/papra:latest
container_name: papra
restart: unless-stopped
ports:
- 1221:1221
environment:
- AUTH_SECRET=${AUTH_SECRET}
- CLIENT_BASE_URL=https://docs.peterwood.rocks
- SERVER_BASE_URL=https://docs.peterwood.rocks
- INGESTION_FOLDER_IS_ENABLED=true
- INTAKE_EMAILS_IS_ENABLED=true
- INTAKE_EMAILS_DRIVER=owlrelay
- INTAKE_EMAILS_WEBHOOK_SECRET=${INTAKE_EMAILS_WEBHOOK_SECRET}
- OWLRELAY_API_KEY=${OWLRELAY_API_KEY}
- OWLRELAY_WEBHOOK_URL=https://doc.peterwood.rocks/api/intake-emails/ingest
volumes:
- ./app-data:/app/app-data
- ./ingestion:/app/ingestion
user: 1000:1000
labels:
- diun.enable=true

3
pdf/.env.example Normal file
View File

@@ -0,0 +1,3 @@
# Security settings
SECURITY_INITIALLOGIN_USERNAME=admin
SECURITY_INITIALLOGIN_PASSWORD=changeme!1

28
pdf/docker-compose.yml Normal file
View File

@@ -0,0 +1,28 @@
services:
stirling-pdf:
container_name: stirlingpdf
image: stirlingtools/stirling-pdf:latest
deploy:
resources:
limits:
memory: 4G
ports:
- 8089:8080
volumes:
- ./stirling/latest/data:/usr/share/tessdata:rw
- ./stirling/latest/config:/configs:rw
- ./stirling/latest/logs:/logs:rw
environment:
SECURITY_ENABLELOGIN: true
DOCKER_ENABLE_SECURITY: true
# SECURITY_INITIALLOGIN_USERNAME: ${SECURITY_INITIALLOGIN_USERNAME}
# SECURITY_INITIALLOGIN_PASSWORD: ${SECURITY_INITIALLOGIN_PASSWORD}
LANGS: "en-US"
SYSTEM_DEFAULTLOCALE: "en-US"
UI_APPNAME: "Stirling PDF"
UI_HOMEDESCRIPTION: ""
UI_APPNAMENAVBAR: ""
SYSTEM_MAXFILESIZE: "100"
restart: unless-stopped
labels:
- diun.enable=true

View File

@@ -0,0 +1,134 @@
#############################################################################################################
# Welcome to settings file from #
# ____ _____ ___ ____ _ ___ _ _ ____ ____ ____ _____ #
# / ___|_ _|_ _| _ \| | |_ _| \ | |/ ___| | _ \| _ \| ___| #
# \___ \ | | | || |_) | | | || \| | | _ _____| |_) | | | | |_ #
# ___) || | | || _ <| |___ | || |\ | |_| |_____| __/| |_| | _| #
# |____/ |_| |___|_| \_\_____|___|_| \_|\____| |_| |____/|_| #
# #
# Do not comment out any entry, it will be removed on next startup #
# If you want to override with environment parameter follow parameter naming SECURITY_INITIALLOGIN_USERNAME #
#############################################################################################################
security:
enableLogin: 'true' # set to 'true' to enable login
csrfDisabled: 'false' # set to 'true' to disable CSRF protection (not recommended for production)
loginAttemptCount: 5 # lock user account after 5 tries; when using e.g. Fail2Ban you can deactivate the function with -1
loginResetTimeMinutes: 120 # lock account for 2 hours after x attempts
loginMethod: normal # Accepts values like 'all' and 'normal'(only Login with Username/Password), 'oauth2'(only Login with OAuth2) or 'saml2'(only Login with SAML2)
initialLogin:
username: admin # initial username for the first login
password: changeme!1 # initial password for the first login
oauth2:
enabled: false # set to 'true' to enable login (Note: enableLogin must also be 'true' for this to work)
client:
keycloak:
issuer: '' # URL of the Keycloak realm's OpenID Connect Discovery endpoint
clientId: '' # client ID for Keycloak OAuth2
clientSecret: '' # client secret for Keycloak OAuth2
scopes: openid, profile, email # scopes for Keycloak OAuth2
useAsUsername: preferred_username # field to use as the username for Keycloak OAuth2
google:
clientId: '' # client ID for Google OAuth2
clientSecret: '' # client secret for Google OAuth2
scopes: https://www.googleapis.com/auth/userinfo.email, https://www.googleapis.com/auth/userinfo.profile # scopes for Google OAuth2
useAsUsername: email # field to use as the username for Google OAuth2
github:
clientId: '' # client ID for GitHub OAuth2
clientSecret: '' # client secret for GitHub OAuth2
scopes: read:user # scope for GitHub OAuth2
useAsUsername: login # field to use as the username for GitHub OAuth2
issuer: '' # set to any provider that supports OpenID Connect Discovery (/.well-known/openid-configuration) endpoint
clientId: '' # client ID from your provider
clientSecret: '' # client secret from your provider
autoCreateUser: true # set to 'true' to allow auto-creation of non-existing users
blockRegistration: false # set to 'true' to deny login with SSO without prior registration by an admin
useAsUsername: email # default is 'email'; custom fields can be used as the username
scopes: openid, profile, email # specify the scopes for which the application will request permissions
provider: google # set this to your OAuth provider's name, e.g., 'google' or 'keycloak'
saml2:
enabled: false # Only enabled for paid enterprise clients (enterpriseEdition.enabled must be true)
autoCreateUser: true # set to 'true' to allow auto-creation of non-existing users
blockRegistration: false # set to 'true' to deny login with SSO without prior registration by an admin
registrationId: stirling
idpMetadataUri: https://dev-XXXXXXXX.okta.com/app/externalKey/sso/saml/metadata
idpSingleLogoutUrl: https://dev-XXXXXXXX.okta.com/app/dev-XXXXXXXX_stirlingpdf_1/externalKey/slo/saml
idpSingleLoginUrl: https://dev-XXXXXXXX.okta.com/app/dev-XXXXXXXX_stirlingpdf_1/externalKey/sso/saml
idpIssuer: http://www.okta.com/externalKey
idpCert: classpath:okta.crt
privateKey: classpath:saml-private-key.key
spCert: classpath:saml-public-cert.crt
enterpriseEdition:
enabled: false # set to 'true' to enable enterprise edition
key: 00000000-0000-0000-0000-000000000000
SSOAutoLogin: false # Enable to auto login to first provided SSO
CustomMetadata:
autoUpdateMetadata: false # set to 'true' to automatically update metadata with below values
author: username # supports text such as 'John Doe' or types such as username to autopopulate with user's username
creator: Stirling-PDF # supports text such as 'Company-PDF'
producer: Stirling-PDF # supports text such as 'Company-PDF'
legal:
termsAndConditions: https://www.stirlingpdf.com/terms-and-conditions # URL to the terms and conditions of your application (e.g. https://example.com/terms). Empty string to disable or filename to load from local file in static folder
privacyPolicy: https://www.stirlingpdf.com/privacy-policy # URL to the privacy policy of your application (e.g. https://example.com/privacy). Empty string to disable or filename to load from local file in static folder
accessibilityStatement: '' # URL to the accessibility statement of your application (e.g. https://example.com/accessibility). Empty string to disable or filename to load from local file in static folder
cookiePolicy: '' # URL to the cookie policy of your application (e.g. https://example.com/cookie). Empty string to disable or filename to load from local file in static folder
impressum: '' # URL to the impressum of your application (e.g. https://example.com/impressum). Empty string to disable or filename to load from local file in static folder
system:
defaultLocale: en-US # set the default language (e.g. 'de-DE', 'fr-FR', etc)
googlevisibility: false # 'true' to allow Google visibility (via robots.txt), 'false' to disallow
enableAlphaFunctionality: false # set to enable functionality which might need more testing before it fully goes live (this feature might make no changes)
showUpdate: false # see when a new update is available
showUpdateOnlyAdmin: false # only admins can see when a new update is available, depending on showUpdate it must be set to 'true'
customHTMLFiles: false # enable to have files placed in /customFiles/templates override the existing template HTML files
tessdataDir: /usr/share/tessdata # path to the directory containing the Tessdata files. This setting is relevant for Windows systems. For Windows users, this path should be adjusted to point to the appropriate directory where the Tessdata files are stored.
enableAnalytics: 'true' # set to 'true' to enable analytics, set to 'false' to disable analytics; for enterprise users, this is set to true
datasource:
enableCustomDatabase: false # Enterprise users ONLY, set this property to 'true' if you would like to use your own custom database configuration
customDatabaseUrl: '' # eg jdbc:postgresql://localhost:5432/postgres, set the url for your own custom database connection. If provided, the type, hostName, port and name are not necessary and will not be used
username: postgres # set the database username
password: postgres # set the database password
type: postgresql # the type of the database to set (e.g. 'h2', 'postgresql')
hostName: localhost # the host name to use for the database url. Set to 'localhost' when running the app locally. Set to match the name of the container name of your database container when running the app on a server (Docker configuration)
port: 5432 # set the port number of the database. Ensure this matches the port the database is listening to
name: postgres # set the name of your database. Should match the name of the database you create
ui:
appName: '' # application's visible name
homeDescription: '' # short description or tagline shown on the homepage
appNameNavbar: '' # name displayed on the navigation bar
endpoints:
toRemove: [] # list endpoints to disable (e.g. ['img-to-pdf', 'remove-pages'])
groupsToRemove: [] # list groups to disable (e.g. ['LibreOffice'])
metrics:
enabled: 'false' # 'true' to enable Info APIs (`/api/*`) endpoints, 'false' to disable
# Automatically Generated Settings (Do Not Edit Directly)
AutomaticallyGenerated:
key: 241af3aa-bb38-4e14-a593-939b64d1d7a3
UUID: 78ddd78f-688c-4b07-a351-edb8580f1fbd
appVersion: 0.39.0
processExecutor:
sessionLimit: # Process executor instances limits
libreOfficeSessionLimit: 1
pdfToHtmlSessionLimit: 1
qpdfSessionLimit: 4
tesseractSessionLimit: 1
pythonOpenCvSessionLimit: 8
weasyPrintSessionLimit: 16
installAppSessionLimit: 1
calibreSessionLimit: 1
timeoutMinutes: # Process executor timeout in minutes
libreOfficetimeoutMinutes: 30
pdfToHtmltimeoutMinutes: 20
pythonOpenCvtimeoutMinutes: 30
weasyPrinttimeoutMinutes: 30
installApptimeoutMinutes: 60
calibretimeoutMinutes: 30
tesseractTimeoutMinutes: 30

View File

@@ -0,0 +1,159 @@
#############################################################################################################
# Welcome to settings file from #
# ____ _____ ___ ____ _ ___ _ _ ____ ____ ____ _____ #
# / ___|_ _|_ _| _ \| | |_ _| \ | |/ ___| | _ \| _ \| ___| #
# \___ \ | | | || |_) | | | || \| | | _ _____| |_) | | | | |_ #
# ___) || | | || _ <| |___ | || |\ | |_| |_____| __/| |_| | _| #
# |____/ |_| |___|_| \_\_____|___|_| \_|\____| |_| |____/|_| #
# #
# Do not comment out any entry, it will be removed on next startup #
# If you want to override with environment parameter follow parameter naming SECURITY_INITIALLOGIN_USERNAME #
#############################################################################################################
security:
enableLogin: true # set to 'true' to enable login
csrfDisabled: false # set to 'true' to disable CSRF protection (not recommended for production)
loginAttemptCount: 5 # lock user account after 5 tries; when using e.g. Fail2Ban you can deactivate the function with -1
loginResetTimeMinutes: 120 # lock account for 2 hours after x attempts
loginMethod: normal # Accepts values like 'all' and 'normal'(only Login with Username/Password), 'oauth2'(only Login with OAuth2) or 'saml2'(only Login with SAML2)
initialLogin:
username: admin # initial username for the first login
password: changeme!1 # initial password for the first login
oauth2:
enabled: false # set to 'true' to enable login (Note: enableLogin must also be 'true' for this to work)
client:
keycloak:
issuer: '' # URL of the Keycloak realm's OpenID Connect Discovery endpoint
clientId: '' # client ID for Keycloak OAuth2
clientSecret: '' # client secret for Keycloak OAuth2
scopes: openid, profile, email # scopes for Keycloak OAuth2
useAsUsername: preferred_username # field to use as the username for Keycloak OAuth2. Available options are: [email | name | given_name | family_name | preferred_name]
google:
clientId: '' # client ID for Google OAuth2
clientSecret: '' # client secret for Google OAuth2
scopes: https://www.googleapis.com/auth/userinfo.email, https://www.googleapis.com/auth/userinfo.profile # scopes for Google OAuth2
useAsUsername: email # field to use as the username for Google OAuth2. Available options are: [email | name | given_name | family_name]
github:
clientId: '' # client ID for GitHub OAuth2
clientSecret: '' # client secret for GitHub OAuth2
scopes: read:user # scope for GitHub OAuth2
useAsUsername: login # field to use as the username for GitHub OAuth2. Available options are: [email | login | name]
issuer: '' # set to any Provider that supports OpenID Connect Discovery (/.well-known/openid-configuration) endpoint
clientId: '' # client ID from your Provider
clientSecret: '' # client secret from your Provider
autoCreateUser: true # set to 'true' to allow auto-creation of non-existing users
blockRegistration: false # set to 'true' to deny login with SSO without prior registration by an admin
useAsUsername: email # default is 'email'; custom fields can be used as the username
scopes: openid, profile, email # specify the scopes for which the application will request permissions
provider: google # set this to your OAuth Provider's name, e.g., 'google' or 'keycloak'
saml2:
enabled: false # Only enabled for paid enterprise clients (enterpriseEdition.enabled must be true)
provider: '' # The name of your Provider
autoCreateUser: true # set to 'true' to allow auto-creation of non-existing users
blockRegistration: false # set to 'true' to deny login with SSO without prior registration by an admin
registrationId: stirling # The name of your Service Provider (SP) app name. Should match the name in the path for your SSO & SLO URLs
idpMetadataUri: https://dev-XXXXXXXX.okta.com/app/externalKey/sso/saml/metadata # The uri for your Provider's metadata
idpSingleLoginUrl: https://dev-XXXXXXXX.okta.com/app/dev-XXXXXXXX_stirlingpdf_1/externalKey/sso/saml # The URL for initiating SSO. Provided by your Provider
idpSingleLogoutUrl: https://dev-XXXXXXXX.okta.com/app/dev-XXXXXXXX_stirlingpdf_1/externalKey/slo/saml # The URL for initiating SLO. Provided by your Provider
idpIssuer: http://www.okta.com/externalKey # The ID of your Provider
idpCert: classpath:okta.crt # The certificate your Provider will use to authenticate your app's SAML authentication requests. Provided by your Provider
privateKey: classpath:saml-private-key.key # Your private key. Generated from your keypair
spCert: classpath:saml-public-cert.crt # Your signing certificate. Generated from your keypair
premium:
key: 00000000-0000-0000-0000-000000000000
enabled: false # Enable license key checks for pro/enterprise features
proFeatures:
SSOAutoLogin: false
CustomMetadata:
autoUpdateMetadata: false
author: username
creator: Stirling-PDF
producer: Stirling-PDF
googleDrive:
enabled: false
clientId: ''
apiKey: ''
appId: ''
mail:
enabled: true # set to 'true' to enable sending emails
host: smtp.example.com # SMTP server hostname
port: 587 # SMTP server port
username: '' # SMTP server username
password: '' # SMTP server password
from: '' # sender email address
legal:
termsAndConditions: https://www.stirlingpdf.com/terms-and-conditions # URL to the terms and conditions of your application (e.g. https://example.com/terms). Empty string to disable or filename to load from local file in static folder
privacyPolicy: https://www.stirlingpdf.com/privacy-policy # URL to the privacy policy of your application (e.g. https://example.com/privacy). Empty string to disable or filename to load from local file in static folder
accessibilityStatement: '' # URL to the accessibility statement of your application (e.g. https://example.com/accessibility). Empty string to disable or filename to load from local file in static folder
cookiePolicy: '' # URL to the cookie policy of your application (e.g. https://example.com/cookie). Empty string to disable or filename to load from local file in static folder
impressum: '' # URL to the impressum of your application (e.g. https://example.com/impressum). Empty string to disable or filename to load from local file in static folder
system:
defaultLocale: en-US # set the default language (e.g. 'de-DE', 'fr-FR', etc)
googlevisibility: false # 'true' to allow Google visibility (via robots.txt), 'false' to disallow
enableAlphaFunctionality: false # set to enable functionality which might need more testing before it fully goes live (this feature might make no changes)
showUpdate: false # see when a new update is available
showUpdateOnlyAdmin: false # only admins can see when a new update is available, depending on showUpdate it must be set to 'true'
customHTMLFiles: false # enable to have files placed in /customFiles/templates override the existing template HTML files
tessdataDir: /usr/share/tessdata # path to the directory containing the Tessdata files. This setting is relevant for Windows systems. For Windows users, this path should be adjusted to point to the appropriate directory where the Tessdata files are stored.
enableAnalytics: true # set to 'true' to enable analytics, set to 'false' to disable analytics; for enterprise users, this is set to true
enableUrlToPDF: false # Set to 'true' to enable URL to PDF, INTERNAL ONLY, known security issues, should not be used externally
disableSanitize: false # set to true to disable Sanitize HTML; (can lead to injections in HTML)
datasource:
enableCustomDatabase: false # Enterprise users ONLY, set this property to 'true' if you would like to use your own custom database configuration
customDatabaseUrl: '' # eg jdbc:postgresql://localhost:5432/postgres, set the url for your own custom database connection. If provided, the type, hostName, port and name are not necessary and will not be used
username: postgres # set the database username
password: postgres # set the database password
type: postgresql # the type of the database to set (e.g. 'h2', 'postgresql')
hostName: localhost # the host name to use for the database url. Set to 'localhost' when running the app locally. Set to match the name of the container name of your database container when running the app on a server (Docker configuration)
port: 5432 # set the port number of the database. Ensure this matches the port the database is listening to
name: postgres # set the name of your database. Should match the name of the database you create
customPaths:
pipeline:
watchedFoldersDir: '' # Defaults to /pipeline/watchedFolders
finishedFoldersDir: '' # Defaults to /pipeline/finishedFolders
operations:
weasyprint: '' # Defaults to /opt/venv/bin/weasyprint
unoconvert: '' # Defaults to /opt/venv/bin/unoconvert
fileUploadLimit: '' # Defaults to "". No limit when string is empty. Set a number, between 0 and 999, followed by one of the following strings to set a limit. "KB", "MB", "GB".
ui:
appName: '' # application's visible name
homeDescription: '' # short description or tagline shown on the homepage
appNameNavbar: '' # name displayed on the navigation bar
languages: [] # If empty, all languages are enabled. To display only German and Polish ["de_DE", "pl_PL"]. British English is always enabled.
endpoints:
toRemove: [] # list endpoints to disable (e.g. ['img-to-pdf', 'remove-pages'])
groupsToRemove: [] # list groups to disable (e.g. ['LibreOffice'])
metrics:
enabled: false # 'true' to enable Info APIs (`/api/*`) endpoints, 'false' to disable
# Automatically Generated Settings (Do Not Edit Directly)
AutomaticallyGenerated:
key: 241af3aa-bb38-4e14-a593-939b64d1d7a3
UUID: 78ddd78f-688c-4b07-a351-edb8580f1fbd
appVersion: 0.46.2
processExecutor:
sessionLimit: # Process executor instances limits
libreOfficeSessionLimit: 1
pdfToHtmlSessionLimit: 1
qpdfSessionLimit: 4
tesseractSessionLimit: 1
pythonOpenCvSessionLimit: 8
weasyPrintSessionLimit: 16
installAppSessionLimit: 1
calibreSessionLimit: 1
timeoutMinutes: # Process executor timeout in minutes
libreOfficetimeoutMinutes: 30
pdfToHtmltimeoutMinutes: 20
pythonOpenCvtimeoutMinutes: 30
weasyPrinttimeoutMinutes: 30
installApptimeoutMinutes: 60
calibretimeoutMinutes: 30
tesseractTimeoutMinutes: 30

View File

@@ -0,0 +1,13 @@
services:
pinchflat:
environment:
- TZ=America/New_York
ports:
- 8945:8945
volumes:
- ./config:/config
- /mnt/share/media/youtube:/downloads
image: ghcr.io/kieraneglin/pinchflat:latest
restart: unless-stopped
labels:
- diun.enable=true

2
plex/.env.example Normal file
View File

@@ -0,0 +1,2 @@
# Get claim token from https://www.plex.tv/claim/
PLEX_CLAIM=

46
plex/docker-compose.yml Normal file
View File

@@ -0,0 +1,46 @@
services:
plex:
image: lscr.io/linuxserver/plex:latest
container_name: plex
# network_mode: host
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
ports:
- 32400:32400/tcp
- 3005:3005/tcp
- 8324:8324/tcp
- 32469:32469/tcp
- 1900:1900/udp
- 32410:32410/udp
- 32412:32412/udp
- 32413:32413/udp
- 32414:32414/udp
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- VERSION=docker
- PLEX_CLAIM=${PLEX_CLAIM}
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
# - <path/to/transcode/temp>:/transcode
volumes:
- plex_data:/config
- /mnt/share/media/movies_kids:/movies_kids
- /mnt/share/media/movies:/movies
- /mnt/share/media/anime:/anime
- /mnt/share/media/babies:/babies
- /mnt/share/media/books_narrated:/books_narrated
- /mnt/share/media/audiobooks:/audiobooks
- /mnt/share/media/tv:/tv
- /mnt/share/media/youtube:/youtube
restart: unless-stopped
volumes:
plex_data: null
networks: {}

14
tclip/compose.yaml Normal file
View File

@@ -0,0 +1,14 @@
name: paste
services:
tclip:
image: ghcr.io/tailscale-dev/tclip:latest
volumes:
- /var/lib/tclip:/data
environment:
- DATA_DIR=/data
- TS_AUTHKEY=${TS_AUTHKEY}
- TSNET_FORCE_LOGIN=1
restart: always
labels:
- diun.enable=true
networks: {}

View File

@@ -0,0 +1,15 @@
services:
uptime-kuma:
image: louislam/uptime-kuma:latest
container_name: uptime-kuma
volumes:
- uptime-kuma:/app/data
ports:
- 6001:3001
restart: unless-stopped
labels:
- diun.enable=true
volumes:
uptime-kuma:
name: uptime-kuma
external: true

2
vaultwarden/.env.example Normal file
View File

@@ -0,0 +1,2 @@
ADMIN_TOKEN=
DOMAIN=

12
vaultwarden/README.md Normal file
View File

@@ -0,0 +1,12 @@
# Create an environment file
```shell
touch ~/docker/vaultwarden/.env
nano ~/docker/vaultwarden/.env
```
## Add the following values
`ADMIN_TOKEN`=xyz
`DOMAIN`=https://example.tld

View File

@@ -0,0 +1,21 @@
services:
vaultwarden:
image: vaultwarden/server:latest
container_name: vaultwarden
ports:
- 7080:80
- 5443:443
volumes:
- vaultwarden_data:/data
environment:
- WEBSOCKET_ENABLED=true
- ADMIN_TOKEN=${ADMIN_TOKEN}
- DOMAIN=${DOMAIN}
- ROCKET_PORT=80
- ROCKET_PROFILE=release
restart: always
labels:
- diun.enable=true
volumes:
vaultwarden_data:
name: vaultwarden_data

13
wiki/.env.example Normal file
View File

@@ -0,0 +1,13 @@
# ~/docker/wiki/.env
# wiki service
DB_TYPE=
DB_HOST=
DB_PORT=
DB_USER=
DB_PASS=
# postgres database service
POSTGRES_DB=
POSTGRES_PASSWORD=
POSTGRES_USER=

31
wiki/docker-compose.yml Normal file
View File

@@ -0,0 +1,31 @@
services:
db:
image: postgres:15-alpine
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_USER: ${POSTGRES_USER}
logging:
driver: none
restart: unless-stopped
volumes:
- db-data:/var/lib/postgresql/data
wiki:
image: ghcr.io/requarks/wiki:2
depends_on:
- db
environment:
DB_TYPE: ${DB_TYPE}
DB_HOST: ${DB_HOST}
DB_PORT: ${DB_PORT}
DB_USER: ${DB_USER}
DB_PASS: ${DB_PASS}
DB_NAME: ${POSTGRES_DB}
restart: unless-stopped
ports:
- 8300:3000
labels:
- diun.enable=true
volumes:
db-data: null
networks: {}