Move stuff to various S3 stores instead of local minio (#5)

Move most things to Cloudflare R2, whilst gotosocial goes to local.

Reviewed-on: #5
Co-authored-by: Natsu Kagami <nki@nkagami.me>
Co-committed-by: Natsu Kagami <nki@nkagami.me>
This commit is contained in:
Natsu Kagami 2024-10-31 13:04:58 +00:00 committed by nki
parent 8702656b24
commit c36f5f66b1
Signed by: DTTHgit
GPG key ID: 3681E15E5C14A241
11 changed files with 315 additions and 156 deletions

View file

@ -48,7 +48,14 @@ in
};
config = mkIf cfg.enable {
systemd.services.arion-authentik.serviceConfig.EnvironmentFile = cfg.envFile;
systemd.services.arion-authentik = {
serviceConfig.EnvironmentFile = cfg.envFile;
serviceConfig.Type = "notify";
serviceConfig.NotifyAccess = "all";
script = lib.mkBefore ''
${lib.getExe pkgs.wait4x} http http://127.0.0.1:${toString cfg.port} --expect-status-code 200 -t 0 -q -- systemd-notify --ready &
'';
};
virtualisation.arion.projects.authentik.settings = {
services.postgresql.service = {
image = images.postgresql;

View file

@ -33,7 +33,7 @@ with lib; {
{
systemd.services.heisenbridge = {
description = "Matrix<->IRC bridge";
requires = [ "matrix-synapse.service" ]; # So the registration file can be used by Synapse
requires = [ "matrix-conduit-nkagami.service" "matrix-synapse.service" ]; # So the registration file can be used by Synapse
wantedBy = [ "multi-user.target" ];
serviceConfig = rec {

View file

@ -4,6 +4,7 @@ let
cfg = config.cloud.gotosocial;
dbUser = "gotosocial";
storageLocation = "/mnt/data/gotosocial";
in
{
options.cloud.gotosocial = {
@ -74,6 +75,9 @@ in
# Media
media-emoji-remote-max-size = 256 * 1024 /* bytes */;
media-emoji-local-max-size = 256 * 1024 /* bytes */;
media-remote-cache-days = 7;
media-cleanup-from = "00:00";
media-cleanup-every = "24h";
# OIDC
oidc-enabled = true;
oidc-idp-name = "DTTH";
@ -82,10 +86,22 @@ in
http-client.block-ips = [ "11.0.0.0/24" ];
# Advanced
advanced-rate-limit-requests = 0;
# Storage
storage-backend = "local";
storage-local-base-path = "${storageLocation}/storage";
# instance-inject-mastodon-version = true;
};
};
systemd.services.gotosocial.requires = mkAfter [ "minio.service" "postgresql.service" ];
systemd.services.gotosocial.after = mkAfter [ "minio.service" "postgresql.service" ];
systemd.services.gotosocial.requires = mkAfter [ "postgresql.service" "arion-authentik.service" ];
systemd.services.gotosocial.after = mkAfter [ "postgresql.service" "arion-authentik.service" ];
systemd.services.gotosocial.unitConfig = {
RequiresMountsFor = [ storageLocation ];
ReadWritePaths = [ storageLocation ];
};
systemd.tmpfiles.settings."10-gotosocial".${storageLocation}.d = {
user = dbUser;
group = dbUser;
mode = "0700";
};
};
}

View file

@ -0,0 +1,183 @@
commit 8c7f8c28fabc174a71499a4737579b24b5c4b244
Author: Natsu Kagami <nki@nkagami.me>
Date: Mon Oct 21 02:17:36 2024 +0200
Support R2
diff --git a/.env.sample b/.env.sample
index eb57ad85c..94ffcee07 100644
--- a/.env.sample
+++ b/.env.sample
@@ -66,6 +66,8 @@ AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569
AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here
AWS_S3_FORCE_PATH_STYLE=true
AWS_S3_ACL=private
+AWS_S3_R2=true
+AWS_S3_R2_PUBLIC_URL=http://s3:4569
# AUTHENTICATION
diff --git a/app/utils/files.ts b/app/utils/files.ts
index 6607a6b12..5138f68ad 100644
--- a/app/utils/files.ts
+++ b/app/utils/files.ts
@@ -63,8 +63,13 @@ export const uploadFile = async (
xhr.addEventListener("loadend", () => {
resolve(xhr.readyState === 4 && xhr.status >= 200 && xhr.status < 400);
});
- xhr.open("POST", data.uploadUrl, true);
- xhr.send(formData);
+ xhr.open(data.method, data.uploadUrl, true);
+ xhr.setRequestHeader("Content-Type", file.type);
+ if (data.method === "POST") {
+ xhr.send(formData);
+ } else {
+ xhr.send(file);
+ }
});
if (!success) {
diff --git a/server/env.ts b/server/env.ts
index 5b420f2e1..4ea1e8d3c 100644
--- a/server/env.ts
+++ b/server/env.ts
@@ -519,6 +519,14 @@ export class Environment {
environment.AWS_S3_UPLOAD_BUCKET_NAME
);
+ @IsOptional()
+ public AWS_S3_R2 = this.toBoolean(environment.AWS_S3_R2 ?? "false");
+
+ @IsOptional()
+ public AWS_S3_R2_PUBLIC_URL = this.toOptionalString(
+ environment.AWS_S3_R2_PUBLIC_URL
+ );
+
/**
* Whether to force path style URLs for S3 objects, this is required for some
* S3-compatible storage providers.
diff --git a/server/routes/api/attachments/attachments.ts b/server/routes/api/attachments/attachments.ts
index 5e6c27594..b7620f440 100644
--- a/server/routes/api/attachments/attachments.ts
+++ b/server/routes/api/attachments/attachments.ts
@@ -3,6 +3,7 @@ import { v4 as uuidv4 } from "uuid";
import { AttachmentPreset } from "@shared/types";
import { bytesToHumanReadable } from "@shared/utils/files";
import { AttachmentValidation } from "@shared/validations";
+import env from "@server/env";
import { AuthorizationError, ValidationError } from "@server/errors";
import auth from "@server/middlewares/authentication";
import { rateLimiter } from "@server/middlewares/rateLimiter";
@@ -90,16 +91,30 @@ router.post(
{ transaction }
);
- const presignedPost = await FileStorage.getPresignedPost(
- key,
- acl,
- maxUploadSize,
- contentType
- );
+ let uploadUrl;
+ let method;
+ let presignedPost = {
+ fields: {},
+ };
+ if (env.AWS_S3_R2) {
+ uploadUrl = await FileStorage.getPresignedPut(key);
+ method = "PUT";
+ } else {
+ uploadUrl = FileStorage.getUploadUrl();
+ method = "POST";
+
+ presignedPost = await FileStorage.getPresignedPost(
+ key,
+ acl,
+ maxUploadSize,
+ contentType
+ );
+ }
ctx.body = {
data: {
- uploadUrl: FileStorage.getUploadUrl(),
+ uploadUrl,
+ method,
form: {
"Cache-Control": "max-age=31557600",
"Content-Type": contentType,
diff --git a/server/storage/files/BaseStorage.ts b/server/storage/files/BaseStorage.ts
index ce0287ebc..a1931c83d 100644
--- a/server/storage/files/BaseStorage.ts
+++ b/server/storage/files/BaseStorage.ts
@@ -26,6 +26,8 @@ export default abstract class BaseStorage {
contentType: string
): Promise<Partial<PresignedPost>>;
+ public abstract getPresignedPut(key: string): Promise<string>;
+
/**
* Returns a promise that resolves with a stream for reading a file from the storage provider.
*
diff --git a/server/storage/files/LocalStorage.ts b/server/storage/files/LocalStorage.ts
index 83cf98c50..324e60dd9 100644
--- a/server/storage/files/LocalStorage.ts
+++ b/server/storage/files/LocalStorage.ts
@@ -30,6 +30,10 @@ export default class LocalStorage extends BaseStorage {
});
}
+ public async getPresignedPut(key: string) {
+ return this.getUrlForKey(key);
+ }
+
public getUploadUrl() {
return "/api/files.create";
}
diff --git a/server/storage/files/S3Storage.ts b/server/storage/files/S3Storage.ts
index a42442e0c..d55ef5472 100644
--- a/server/storage/files/S3Storage.ts
+++ b/server/storage/files/S3Storage.ts
@@ -4,6 +4,7 @@ import {
S3Client,
DeleteObjectCommand,
GetObjectCommand,
+ PutObjectCommand,
ObjectCannedACL,
} from "@aws-sdk/client-s3";
import { Upload } from "@aws-sdk/lib-storage";
@@ -58,6 +59,16 @@ export default class S3Storage extends BaseStorage {
return createPresignedPost(this.client, params);
}
+ public async getPresignedPut(key: string) {
+ const params = {
+ Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
+ Key: key,
+ };
+
+ const command = new PutObjectCommand(params);
+ return await getSignedUrl(this.client, command, { expiresIn: 3600 });
+ }
+
private getPublicEndpoint(isServerUpload?: boolean) {
if (env.AWS_S3_ACCELERATE_URL) {
return env.AWS_S3_ACCELERATE_URL;
@@ -137,10 +148,17 @@ export default class S3Storage extends BaseStorage {
);
}
+ public getR2ObjectUrl = async (key: string) =>
+ env.AWS_S3_R2_PUBLIC_URL + "/" + key;
+
public getSignedUrl = async (
key: string,
expiresIn = S3Storage.defaultSignedUrlExpires
) => {
+ if (env.AWS_S3_R2) {
+ return this.getR2ObjectUrl(key);
+ }
+
const isDocker = env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
const params = {
Bucket: this.getBucket(),