Base Initialization
-
Copy and paste the code snippets from each section below into your
db(aliased to#) folder.
Helpers
Section titled “Helpers”export const TIMESTAMPTZ_CONFIG = { precision: 3, withTimezone: true } as const;Why use {precision: 3, withTimezone: true}?
withTimezone: true: Postgres stores a TIMESTAMP WITH TIME ZONE as essentially a UTC time with an offset. It can be thought of as a moment in time. Whereas a TIMESTAMP WITHOUT TIME ZONE ignores any timezone indication on the input. It’s like reading the time on a wall clock. It’s generally considered best practice to always use timestamptz.
If you can guarantee that all of your server and database instances are in the same timezone, then there probably won’t be any practical difference between timestamp and timestamptz. However, there is no real advantage to using timestamp, they both occupy 8 bytes. That’s free real estate
precision: 3: The javascript Date object only supports up to milliseconds (3), so the default postgres precision of microseconds (6) is more resolution than most apps will ever use. The timestamptz type will use 8 bytes regardless of resolution, but rounding may lead to smaller backups after compression. Your mileage may vary
import { bigint, timestamp as pgTimestamp, uuid } from "drizzle-orm/pg-core";
import { TIMESTAMPTZ_CONFIG } from "./consts";
// === Ids ===
export const id = bigint("id", { mode: "bigint" }).primaryKey();
export const randomId = uuid("id").primaryKey();
// === Timestamps ===
export const timestamp = pgTimestamp("timestamp", TIMESTAMPTZ_CONFIG).notNull();
export const createdAt = pgTimestamp( "created_at", TIMESTAMPTZ_CONFIG,).notNull();
export const updatedAt = pgTimestamp("updated_at", TIMESTAMPTZ_CONFIG);
export const archivedAt = pgTimestamp("archived_at", TIMESTAMPTZ_CONFIG) .notNull() .defaultNow();import type { PreparedQueryConfig, PgPreparedQuery, PgTable, PgColumn,} from "drizzle-orm/pg-core";import * as v from "valibot";
import { db } from "#/index";
import type { QueryExecutor, Tx } from "./types";import { eq } from "drizzle-orm";
type StmtParams = [qx: QueryExecutor, label?: string];
/** * Convenience function to reduce boilerplate for writing prepared statements. * * THIS DOES NOT RETURN A STATEMENT DIRECTLY. It returns a function that returns a statement. * * This allows a single definition for the statement to be run on the database normally OR as part of a transaction. * * Useful for enforcing the shape of stmt functions used as params for {@link createQueryFn} */export function createStmtFn<T extends PgPreparedQuery<PreparedQueryConfig>>( fn: (...[qx, label]: StmtParams) => T,) { return (...[qx, label = "default"]: StmtParams) => fn(qx, label);}
type GenericValidationError = v.ErrorMessage<v.BaseIssue<unknown>> | undefined;
type GenericObjectValidationSchema = v.ObjectSchema< v.ObjectEntries, GenericValidationError>;
/** * Convenience function to reduce boilerplate for writing functions that query the database. * * The function returned automatically handles typing for the input based on the validation schema and using a transaction if one is provided. */export function createQueryFn< VSchema extends | GenericObjectValidationSchema | v.VariantSchema< string, GenericObjectValidationSchema[], GenericValidationError >, Stmt extends PgPreparedQuery<PreparedQueryConfig>,>({ vSchema, stmtFn, defaultStmt,}: { vSchema: VSchema; stmtFn: ReturnType<typeof createStmtFn<Stmt>>; defaultStmt?: ReturnType<ReturnType<typeof createStmtFn<Stmt>>>;}) { return async ( input: v.InferInput<typeof vSchema>, viaTx?: { tx: Tx; label: string }, ) => { const stmt = viaTx ? stmtFn(viaTx.tx, viaTx.label) : (defaultStmt ?? stmtFn(db));
const data = await stmt.execute(v.parse(vSchema, input));
return data as Awaited<ReturnType<Stmt["execute"]>>; };}
/** * Convenience function to reduce boilerplate for writing functions that archives rows for a given table * * Creates a transaction that confirms the row(s) exists, appends the existing data to an archive table, processes given cascades, and finally removes the row(s) * * The function returned automatically handles typing for the input based on the select function provided and * can extend an external transaction to the query if necessary */export function createArchiveFn<T extends Record<string, string | bigint>>({ selectFn, activeTable, archiveTable, selectBy = "id", cascades,}: { selectFn: (input: T, viaTx?: { tx: Tx; label: string }) => Promise<unknown>; activeTable: PgTable & Record<keyof T, PgColumn>; archiveTable: PgTable; selectBy?: keyof T; cascades?: (tx: Tx, selector: T[keyof T]) => Promise<unknown>[];}) { return (input: Parameters<typeof selectFn>[0], externalTx?: Tx) => { const _archive = async (tx: Tx) => { // Find the existing row data const rowData = (await selectFn(input, { tx, label: "archive", })) as Record<string, unknown>[];
// Throw an error if data isn't found if (!rowData.length) throw new Error( `ARCHIVE ERROR: CANNOT FIND ROW WHERE ${String(selectBy)} = ${input[selectBy]}`, );
const selector = input[selectBy];
// Insert the data into the archive table await tx .insert(archiveTable) .values(rowData.map((row) => ({ ...row, [selectBy]: selector })));
// Process cascades concurrently if (cascades) await Promise.all(cascades(tx, selector));
// Delete the row data from the active table await tx.delete(activeTable).where(eq(activeTable[selectBy], selector)); };
// Process with an external transaction if it exists if (externalTx) return _archive(externalTx);
// Process with a newly generated transaction return db.transaction((tx) => _archive(tx)); };}import { type db } from "#/index";
/** Drizzle Postgres Transaction objecct type alias for convenience */export type Tx = Parameters<Parameters<typeof db.transaction>[0]>[0];
/** Reference for running a query with the normal db object or a transaction object */export type QueryExecutor = typeof db | Tx;
/** Basic type for data returned by ops functions */type OpsData = Record<string, unknown>;
/** Consistent return type for ops functions */export type OpsSafeReturn< D extends OpsData = OpsData, M extends string = string,> = | { success: true; data: D; message?: never; } | { success: false; data?: never; message?: M };Models
Section titled “Models”Users are the foundation of the wunshot platform.
Most othere modules will rely on the data in the users table and may require modifying it with additional columns.
The code on this page only provides the primitive structure. There will not be any real functionality until features from other modules are added.
users
| Column | Type | Description |
|---|---|---|
id | uuid | The user’s unique id |
created_at | timestamptz | The time the user was created |
updated_at | timestamptz | The time the user was last updated |
Why use a uuid as the primary key?
A sequential id would be faster and take less space. However, it opens a potential attack vector for a bad actor to guess the ids of specific users.
Using postgres built-in functionality has some advantages over third-party random id generators.
-
any application that connects to the database will automatically use the same random id generator
-
the uuid format can be enforced by the database
-
uuidv4 is ubiquitous, so any connected applications will likely be able to generate ids on-the-fly if they need to
Overall, it’s a good compromise between performance, security, and consistency.
usersArchive
All of the columns from the users table are included in usersArchive.
Plus:
| Column | Type | Description |
|---|---|---|
archived_at | timestamptz | The time the user was archived |
import { pgTable } from "drizzle-orm/pg-core";
import { archivedAt, createdAt, randomId, updatedAt } from "#/helpers/cols";
export const users = pgTable("users", { id: randomId.defaultRandom(), createdAt: createdAt.defaultNow(), updatedAt,});
export const usersArchive = pgTable("users_archive", { id: randomId, createdAt, updatedAt, archivedAt,});import { createSelectSchema } from "drizzle-valibot";import * as v from "valibot";
import { users } from "./schemas";
//// PRIMITIVES ////
const { id } = createSelectSchema(users).entries;
//// INSERT ////
export const Insert = v.object({});
//// SELECT ////
export const Select = v.object({ id });import { eq, getTableColumns, sql } from "drizzle-orm";
import { users } from "./schemas";import * as V from "./validations";import { createQueryFn, createStmtFn } from "#/helpers/funcs";
//// PRIMITIVES ////
const { id } = getTableColumns(users);
//// INSERT ////
const insertStmt = createStmtFn((qx, label) => qx .insert(users) .values({}) .returning() .prepare(`user_insert_${label}`),);
export const insert = createQueryFn({ vSchema: V.Insert, stmtFn: insertStmt });
//// SELECT ////
const selectStmt = createStmtFn((qx, label) => qx .select() .from(users) .where(eq(id, sql.placeholder("id"))) .limit(1) .prepare(`user_select_${label}`),);
export const select = createQueryFn({ vSchema: V.Select, stmtFn: selectStmt });import { eq } from "drizzle-orm";
import { txDb } from "#/index";import { type Tx } from "#/helpers/types";import { activities } from "#/models/activities/schemas";
import * as queries from "./queries";import { users, usersArchive } from "./schemas";
export const USER_NOT_FOUND = "User not found";
/** Adds given id to the usersArchive, handles related cascades, and removes it from users */export function archiveUser( input: Parameters<typeof queries.select>[0], externalTx?: Tx,) { const _archiveUser = async (tx: Tx) => { /** Find the existing row data */ const [rowData] = await queries.select(input, { tx, label: "archive" });
/** Throw a named error if no user is found */ if (!rowData) throw new Error(USER_NOT_FOUND);
const { id } = input;
/** Insert the user data into the archive table */ await tx.insert(usersArchive).values({ ...rowData, id });
/** Process cascades concurrently */ await Promise.all([ /** Update related tables */ tx .update(activities) .set({ usersArchiveId: id, userId: null }) .where(eq(activities.userId, id)), ]);
/** Delete the row data from the users table */ await tx.delete(users).where(eq(users.id, input.id)); };
/** Process with an external transaction if it exists */ if (externalTx) return _archiveUser(externalTx);
/** Process with a newly generated transaction */ return txDb.transaction((tx) => _archiveUser(tx));}Activities
Section titled “Activities”The activities table essentially acts as a basic audit trail within wunshot.
The main reason this model is included at a base level is to provide a standard unified way to record errors.
If you don’t need or want that functionality, you can skip it. But beware, other modules will assume this exists and you will need to remove references to it.
activities
| Column | Type | Description |
|---|---|---|
id | bigint | The activity’s unique id |
user_id | uuid | The user who performed the activity |
users_archive_id | uuid | If a user is archived, don’t lose the reference |
label | text | A text flag used to identify the activity. This could be an enum, but that becomes difficult to manage as ops functions are added |
success | boolean | Whether the activity succeeded or failed |
failureCause | text | If the activity failed, this will contain the reason |
meta | jsonb | Any additional metadata about the activity |
timestamp | timestamptz | The time the activity was created. Activities act like a log, so it doesn’t make sense to have created_at and updated_at |
activitiesArchive
All of the columns from the activities table are included in activitiesArchive.
Plus:
| Column | Type | Description |
|---|---|---|
archived_at | timestamptz | The time the activity was archived. |
import { boolean, index, jsonb, pgTable, text, uuid,} from "drizzle-orm/pg-core";
import { id, timestamp, archivedAt } from "#/helpers/cols";import { users, usersArchive } from "#/models/users/schemas";
const activitiesBaseCols = { userId: uuid("user_id").references(() => users.id), usersArchiveId: uuid("users_archive_id").references(() => usersArchive.id), label: text("label").notNull(), // Could be an enum, but it gets difficult to manage as ops are added success: boolean("success").notNull(), failureCause: text("failure_cause"), // Could be an enum, but it gets difficult to manage as ops are added meta: jsonb("meta"),};
export const activities = pgTable( "activities", { ...activitiesBaseCols, id: id.generatedAlwaysAsIdentity(), timestamp: timestamp.defaultNow(), }, (table) => [index().on(table.timestamp)],);
/** * Used to preserve relations while keep the activites table small and efficient * @todo set up a cron job to move old activites to the archive automatically */export const activitiesArchive = pgTable("activities_archive", { ...activitiesBaseCols, id, timestamp, archivedAt,});import { createInsertSchema, createSelectSchema } from "drizzle-valibot";import * as v from "valibot";
import { activities } from "./schemas";
//// PRIMITIVES ////
const { id } = createSelectSchema(activities).entries;
const { label, failureCause, meta, userId } = createInsertSchema(activities).entries;
//// INSERT ////
const insertBaseVariant = { label, userId: v.nullish(v.unwrap(userId), null), meta: v.nullish(v.unwrap(meta), null),};
export const Insert = v.variant("success", [ v.object({ ...insertBaseVariant, success: v.literal(true), failureCause: v.nullish(v.null(), null), }), /** * You could use another tier of variant schemas keying on the failureCause here. * That's how it was originally written, but it became complicated to update/maintain **/ v.object({ ...insertBaseVariant, success: v.literal(false), failureCause: v.nonNullish(failureCause), }),]);
//// SELECT ////
export const Select = v.object({ id });import { and, eq, getTableColumns, isNotNull, or, sql } from "drizzle-orm";
import { createQueryFn, createStmtFn } from "#/helpers/funcs";import { db } from "#/index";
import { activities } from "./schemas";import * as V from "./validations";import { excessiveActivities } from "./views";
//// PRIMITIVES ////
const { id, failureCause, label: labelCol, meta, success, userId, usersArchiveId, weight, timestamp,} = getTableColumns(activities);
//// INSERT ////
const insertStmt = createStmtFn((qx, label = "") => qx .insert(activities) .values({ userId: sql.placeholder("userId"), success: sql.placeholder("success"), label: sql.placeholder("label"), failureCause: sql.placeholder("failureCause"), meta: sql.placeholder("meta"), }) .returning({ id }) .prepare(`activity_insert_${label}`),);
export const insert = createQueryFn({ vSchema: V.Insert, stmtFn: insertStmt });
//// SELECT ////
const selectStmt = createStmtFn((qx, label) => qx .select({ failureCause, label: labelCol, meta, success, userId, usersArchiveId, timestamp, }) .from(activities) .where(eq(id, sql.placeholder("id"))) .limit(1) .prepare(`activity_select_${label}`),);
export const select = createQueryFn({ vSchema: V.Select, stmtFn: selectStmt });import { eq } from "drizzle-orm";
import { txDb } from "#/index";import { type Tx } from "#/helpers/types";
import * as queries from "./queries";import { activities, activitiesArchive } from "./schemas";
export const ACTIVITY_NOT_FOUND = "Activity not found";
/** Adds given id to the activitiesArchive, handles related cascades, and removes it from activities */export function archiveActivity( input: Parameters<typeof queries.select>[0], externalTx?: Tx,) { const _archiveActivity = async (tx: Tx) => { /** Find the existing row data */ const [rowData] = await selectActivity(input, { tx, label: "archive", });
/** Throw a named error if data isn't found */ if (!rowData) throw new Error(ACTIVITY_NOT_FOUND);
const { id } = input;
/** Insert the data into the archive table */ await tx.insert(activitiesArchive).values({ ...rowData, id });
/** Process cascades concurrently */ // await Promise.all([ // /** Update related tables */ // ]);
/** Delete the row data from the live table */ await tx.delete(activities).where(eq(activities.id, id)); };
/** Process with an external transaction if it exists */ if (externalTx) return _archiveActivity(externalTx);
/** Process with a newly generated transaction */ return txDb.transaction((tx) => _archiveActivity(tx));}import { serializeError } from "serialize-error";import { type InferInput } from "valibot";
import { insert as insertActivity } from "#/models/activities/queries";import { type Insert as InsertActivity } from "#/models/activities/validations";
type InsertActivityInput = InferInput<typeof InsertActivity>;
/** * Extends error to include an activity property. * It is intended to be processed by {@link logFailedActivity} after being caught */export class ActivityError<M extends string> extends Error { readonly activity; declare readonly message: M; constructor({ activity, message = "Something went wrong" as M, }: { activity: Omit< Exclude<InsertActivityInput, { success: true }>, "label" | "success" >; message?: M; }) { super(message, { cause: activity.failureCause }); this.name = "ActivityError"; this.activity = activity; }}
/** Prints an exception to the console with some basic style formatting */function printException({ error, label = "",}: { error: unknown; label?: string;}) { console.group(`\x1b[31m=== ${label ? `${label} ` : ""}EXCEPTION ===\x1b[0m`); console.error(error); console.groupEnd();}
/** Simple wrapper of {@link insertActivity} for modules to build on top off */function logActivity(input: Parameters<typeof insertActivity>[0]) { return insertActivity(input);}
/** * Logs a given error as a failed activity. * Automatically formats unknown errors for database entry. */async function logFailedActivity({ error, ...passThrough}: { error: unknown; label: InsertActivityInput["label"]; ipAddress: string;}) { try { if (error instanceof ActivityError) { if (process.env.NODE_ENV === "development") printException({ error, label: "ACTIVITY" }); return await logActivity({ ...error.activity, ...passThrough, success: false, } as InsertActivityInput); } else { if (process.env.NODE_ENV === "development") printException({ error, label: "UNKNOWN" }); // eslint-disable-next-line @typescript-eslint/no-unused-vars const { stack, ...errorProps } = serializeError(error) as { [x: string]: unknown; }; return await logActivity({ ...passThrough, success: false, failureCause: "UNKNOWN", meta: errorProps, }); } } catch (error) { /** * DANGER: NOT SUITED FOR PRODUCTION. UNHANDLED EXCEPTIONS SHOULD BE REPORTED/STORED SOMEWHERE * If adding to activities_log fails, print the error to the console * this is a last-ditch attempt to capture an exception */ printException({ error, label: "UNHANDLED" }); }}
/** Logs given activity as a success */async function logSuccessfulActivity( input: Omit<Extract<InsertActivityInput, { success: true }>, "success">,) { try { logActivityAndHandleBans({ ...input, success: true, }); } catch (error) { /** * DANGER: NOT SUITED FOR PRODUCTION. UNHANDLED EXCEPTIONS SHOULD BE REPORTED/STORED SOMEWHERE * If adding to activities_log fails, print the error to the console * this is a last-ditch attempt to capture an exception */ printException({ error, label: "UNHANDLED" }); }}
/** Apply a given label to {@link logSuccessfulActivity} and {@link logFailedActivity} */export function initializeLoggers({ label }: { label: string }) { return { logSuccess: ( input: Omit<Parameters<typeof logSuccessfulActivity>[0], "label">, ) => logSuccessfulActivity({ ...input, label }), logFailure: ( input: Omit<Parameters<typeof logFailedActivity>[0], "label">, ) => logFailedActivity({ ...input, label }), };}Generate and apply migrations
Section titled “Generate and apply migrations”Generate .sql files based on the schema with:
npm run db:generateOnce the migrations are generated, you can apply them to your database with:
npm run db:migrate