Database schema, seeds and scripts for seed, setup & teardown

This commit is contained in:
2022-12-11 18:15:47 +01:00
parent 5f42163dbe
commit 099c3a59ac
22 changed files with 1203 additions and 0 deletions

15
src/database/index.ts Normal file
View File

@@ -0,0 +1,15 @@
import Configuration from "../config/configuration";
import PostgresDatabase from "./postgres";
const configuration = Configuration.getInstance();
const user = configuration.get("database", "user");
const password = configuration.get("database", "password");
const host = configuration.get("database", "host");
const dbName = configuration.get("database", "database");
let postgresDB = new PostgresDatabase(user, password, host, dbName);
postgresDB.connect();
export default postgresDB;

156
src/database/postgres.ts Normal file
View File

@@ -0,0 +1,156 @@
import logger from "../logger";
import fs from "fs";
import path from "path";
import { Pool } from "pg";
class PostgresDatabase {
user: string;
password: string;
host: string;
database: string;
pool: any;
constructor(user, password = "", host, database) {
this.user = user;
this.password = password;
this.host = host;
this.database = database;
this.pool = new Pool({
user,
password,
host,
database,
port: 5432,
application_name: "Node-Postgres - Application <planetposen-backend>",
});
// save queries to postgres in local list
// should prevent this from overflowing.
}
connect() {
return this.pool.connect();
}
/**
* Run a SQL query against the database and retrieve one row.
* @param {String} sql SQL query
* @param {Array} values in the SQL query
* @returns {Promise}
*/
query(sql, values) {
const start = Date.now();
return this.pool
.query(sql, values)
.then((res) => {
const duration = Date.now() - start;
logger.debug("Executed query", {
sql,
values,
duration,
rows: res.rowCount,
});
return res.rowCount;
})
.catch((err) => {
logger.error("DB query error:", err);
// TODO log db error
throw err;
});
}
/**
* Update w/ query and return true or false if update was successful.
* @param {String} sql SQL query
* @param {Array} values in the SQL query
* @returns {Promise}
*/
update(sql, values) {
const start = Date.now();
return this.pool
.query(sql, values)
.then((res) => {
const duration = Date.now() - start;
logger.debug("Executed update query", {
sql,
values,
duration,
rows: res.rowCount,
});
if (res.rowCount > 0) {
return true;
}
return false;
})
.catch((err) => {
logger.error("DB update error:", err);
// TODO log db error
throw err;
});
}
/**
* Run a SQL query against the database and retrieve all the rows.
* @param {String} sql SQL query
* @param {Array} values in the SQL query
* @returns {Promise}
*/
all(sql, values) {
const start = Date.now();
return this.pool
.query(sql, values)
.then((res) => {
const duration = Date.now() - start;
logger.debug("Executed all query", {
sql,
values,
duration,
rows: res.rows.length,
});
return res.rows;
})
.catch((err) => {
// TODO log db error
logger.error("DB all error:", err);
throw err;
});
}
/**
* Run a SQL query against the database and retrieve one row.
* @param {String} sql SQL query
* @param {Array} values in the SQL query
* @returns {Promise}
*/
get(sql, values) {
const start = Date.now();
return this.pool
.query(sql, values)
.then((res) => {
const duration = Date.now() - start;
logger.debug("Executed get query", {
sql,
values,
duration,
rows: res.rows.length,
});
return res.rows[0];
})
.catch((err) => {
// TODO log db error
logger.error("DB get error:", err);
throw err;
});
}
}
export default PostgresDatabase;

View File

@@ -0,0 +1,2 @@
CREATE SEQUENCE
IF NOT EXISTS "id_sequence";

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS migration (
migration_id serial PRIMARY KEY,
filename text,
run_date timestamp DEFAULT CURRENT_TIMESTAMP
);

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS seed (
seed_id serial PRIMARY KEY,
filename text,
run_date timestamp DEFAULT CURRENT_TIMESTAMP
);

View File

@@ -0,0 +1,32 @@
CREATE TABLE IF NOT EXISTS product (
product_no serial PRIMARY KEY,
name text,
description text,
image text,
subtext text,
primary_color text,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS product_sku (
sku_id serial PRIMARY KEY,
product_no integer REFERENCES product,
price integer,
size text,
stock real,
default_price boolean DEFAULT FALSE,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE OR REPLACE VIEW product_info AS
SELECT product.product_no, product_sku.sku_id, name, image, description, subtext, primary_color, price, size, stock, default_price
FROM product
INNER JOIN product_sku
ON product.product_no = product_sku.product_no;
CREATE OR REPLACE VIEW available_products AS
SELECT *
FROM product_info
WHERE stock > 0;

View File

@@ -0,0 +1,27 @@
CREATE TABLE IF NOT EXISTS cart (
cart_id serial PRIMARY KEY,
client_id text,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS cart_lineitem (
lineitem_id serial PRIMARY KEY,
cart_id integer REFERENCES cart ON DELETE CASCADE,
product_no integer REFERENCES product ON DELETE CASCADE,
product_sku_no integer REFERENCES product_sku ON DELETE CASCADE,
quantity real
);
CREATE OR REPLACE VIEW cart_detailed AS
SELECT cart.client_id, cart.cart_id,
cart_lineitem.lineitem_id, cart_lineitem.quantity,
product_sku.sku_id, product_sku.size, product_sku.price,
product.product_no, product.name, product.description, product.subtext, product.image, product.primary_color
FROM cart
INNER JOIN cart_lineitem
ON cart.cart_id = cart_lineitem.cart_id
INNER JOIN product_sku
ON cart_lineitem.product_sku_no = product_sku.sku_id
INNER JOIN product
ON product.product_no = cart_lineitem.product_no;

View File

@@ -0,0 +1,11 @@
CREATE TABLE IF NOT EXISTS customer (
customer_no varchar(36) PRIMARY KEY DEFAULT gen_random_uuid(),
email text,
first_name text,
last_name text,
street_address text,
zip_code real,
city text,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
)

View File

@@ -0,0 +1,4 @@
CREATE TABLE IF NOT EXISTS payment_types (
payment_id serial PRIMARY KEY,
name text
);

View File

@@ -0,0 +1,18 @@
CREATE TABLE IF NOT EXISTS vipps_payments (
order_id varchar(127) PRIMARY KEY,
parent_order_id varchar(127),
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP,
transaction_text text,
merchant_serial_number text,
payment_payload json,
vipps_initiation_response json,
vipps_transaction_id text,
vipps_status text DEFAULT 'NOT_STARTED',
vipps_confirmation_response json,
end_time timestamp,
hours float DEFAULT 0,
amount int DEFAULT 0,
captured int DEFAULT 0,
refunded int DEFAULT 0
);

View File

@@ -0,0 +1,40 @@
CREATE TABLE IF NOT EXISTS orders (
order_id varchar(36) PRIMARY KEY DEFAULT gen_random_uuid(),
customer_no varchar(36) REFERENCES customer ON DELETE SET NULL,
status text DEFAULT 'INITIATED',
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS orders_lineitem (
orders_lineitem_id serial PRIMARY KEY,
order_id varchar(36) REFERENCES orders,
product_no integer REFERENCES product ON DELETE SET NULL,
product_sku_no integer REFERENCES product_sku ON DELETE SET NULL,
price integer,
quantity real,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS shipping (
shipping_id serial PRIMARY KEY,
order_id varchar(36) REFERENCES orders,
shipping_company text,
tracking_code text,
tracking_link text,
user_notified timestamp,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP
);
CREATE OR REPLACE VIEW orders_detailed AS
SELECT
orders.order_id as order_id, orders.status as order_status, orders.created as order_created, orders.updated as order_updated,
customer.customer_no, customer.email, customer.first_name, customer.last_name, customer.street_address, customer.zip_code, customer.city,
shipping.shipping_id, shipping.shipping_company, shipping.tracking_code, shipping.tracking_link, shipping.user_notified, shipping.created as shipping_created, shipping.updated as shipping_updated
FROM orders
INNER JOIN customer
ON orders.customer_no = customer.customer_no
JOIN shipping
ON orders.order_id = shipping.order_id;

View File

@@ -0,0 +1,225 @@
CREATE EXTENSION IF NOT EXISTS hstore;
CREATE SCHEMA IF NOT EXISTS audit;
REVOKE ALL ON SCHEMA audit FROM public;
COMMENT ON SCHEMA audit IS 'Out-of-table audit/history logging tables and trigger functions';
--
-- Audited data. Lots of information is available, it's just a matter of how much
-- you really want to record. See:
--
-- http://www.postgresql.org/docs/9.1/static/functions-info.html
--
-- Remember, every column you add takes up more audit table space and slows audit
-- inserts.
--
-- Every index you add has a big impact too, so avoid adding indexes to the
-- audit table unless you REALLY need them. The hstore GIST indexes are
-- particularly expensive.
--
-- It is sometimes worth copying the audit table, or a coarse subset of it that
-- you're interested in, into a temporary table where you CREATE any useful
-- indexes and do your analysis.
--
CREATE TABLE IF NOT EXISTS audit.logged_actions (
event_id bigserial primary key,
schema_name text not null,
table_name text not null,
relid oid not null,
session_user_name text,
action_tstamp_tx TIMESTAMP WITH TIME ZONE NOT NULL,
action_tstamp_stm TIMESTAMP WITH TIME ZONE NOT NULL,
action_tstamp_clk TIMESTAMP WITH TIME ZONE NOT NULL,
transaction_id bigint,
application_name text,
client_addr inet,
client_port integer,
client_query text,
action TEXT NOT NULL CHECK (action IN ('I','D','U', 'T')),
row_data hstore,
changed_fields hstore,
statement_only boolean not null
);
REVOKE ALL ON audit.logged_actions FROM public;
COMMENT ON TABLE audit.logged_actions IS 'History of auditable actions on audited tables, from audit.if_modified_func()';
COMMENT ON COLUMN audit.logged_actions.event_id IS 'Unique identifier for each auditable event';
COMMENT ON COLUMN audit.logged_actions.schema_name IS 'Database schema audited table for this event is in';
COMMENT ON COLUMN audit.logged_actions.table_name IS 'Non-schema-qualified table name of table event occured in';
COMMENT ON COLUMN audit.logged_actions.relid IS 'Table OID. Changes with drop/create. Get with ''tablename''::regclass';
COMMENT ON COLUMN audit.logged_actions.session_user_name IS 'Login / session user whose statement caused the audited event';
COMMENT ON COLUMN audit.logged_actions.action_tstamp_tx IS 'Transaction start timestamp for tx in which audited event occurred';
COMMENT ON COLUMN audit.logged_actions.action_tstamp_stm IS 'Statement start timestamp for tx in which audited event occurred';
COMMENT ON COLUMN audit.logged_actions.action_tstamp_clk IS 'Wall clock time at which audited event''s trigger call occurred';
COMMENT ON COLUMN audit.logged_actions.transaction_id IS 'Identifier of transaction that made the change. May wrap, but unique paired with action_tstamp_tx.';
COMMENT ON COLUMN audit.logged_actions.client_addr IS 'IP address of client that issued query. Null for unix domain socket.';
COMMENT ON COLUMN audit.logged_actions.client_port IS 'Remote peer IP port address of client that issued query. Undefined for unix socket.';
COMMENT ON COLUMN audit.logged_actions.client_query IS 'Top-level query that caused this auditable event. May be more than one statement.';
COMMENT ON COLUMN audit.logged_actions.application_name IS 'Application name set when this audit event occurred. Can be changed in-session by client.';
COMMENT ON COLUMN audit.logged_actions.action IS 'Action type; I = insert, D = delete, U = update, T = truncate';
COMMENT ON COLUMN audit.logged_actions.row_data IS 'Record value. Null for statement-level trigger. For INSERT this is the new tuple. For DELETE and UPDATE it is the old tuple.';
COMMENT ON COLUMN audit.logged_actions.changed_fields IS 'New values of fields changed by UPDATE. Null except for row-level UPDATE events.';
COMMENT ON COLUMN audit.logged_actions.statement_only IS '''t'' if audit event is from an FOR EACH STATEMENT trigger, ''f'' for FOR EACH ROW';
CREATE INDEX IF NOT EXISTS logged_actions_relid_idx ON audit.logged_actions(relid);
CREATE INDEX IF NOT EXISTS logged_actions_action_tstamp_tx_stm_idx ON audit.logged_actions(action_tstamp_stm);
CREATE INDEX IF NOT EXISTS logged_actions_action_idx ON audit.logged_actions(action);
CREATE OR REPLACE FUNCTION audit.if_modified_func() RETURNS TRIGGER AS $body$
DECLARE
audit_row audit.logged_actions;
include_values boolean;
log_diffs boolean;
h_old hstore;
h_new hstore;
excluded_cols text[] = ARRAY[]::text[];
BEGIN
IF TG_WHEN <> 'AFTER' THEN
RAISE EXCEPTION 'audit.if_modified_func() may only run as an AFTER trigger';
END IF;
audit_row = ROW(
nextval('audit.logged_actions_event_id_seq'), -- event_id
TG_TABLE_SCHEMA::text, -- schema_name
TG_TABLE_NAME::text, -- table_name
TG_RELID, -- relation OID for much quicker searches
session_user::text, -- session_user_name
current_timestamp, -- action_tstamp_tx
statement_timestamp(), -- action_tstamp_stm
clock_timestamp(), -- action_tstamp_clk
txid_current(), -- transaction ID
current_setting('application_name'), -- client application
inet_client_addr(), -- client_addr
inet_client_port(), -- client_port
current_query(), -- top-level query or queries (if multistatement) from client
substring(TG_OP,1,1), -- action
NULL, NULL, -- row_data, changed_fields
'f' -- statement_only
);
IF NOT TG_ARGV[0]::boolean IS DISTINCT FROM 'f'::boolean THEN
audit_row.client_query = NULL;
END IF;
IF TG_ARGV[1] IS NOT NULL THEN
excluded_cols = TG_ARGV[1]::text[];
END IF;
IF (TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW') THEN
audit_row.row_data = hstore(OLD.*);
audit_row.changed_fields = (hstore(NEW.*) - audit_row.row_data) - excluded_cols;
IF audit_row.changed_fields = hstore('') THEN
-- All changed fields are ignored. Skip this update.
RETURN NULL;
END IF;
ELSIF (TG_OP = 'DELETE' AND TG_LEVEL = 'ROW') THEN
audit_row.row_data = hstore(OLD.*) - excluded_cols;
ELSIF (TG_OP = 'INSERT' AND TG_LEVEL = 'ROW') THEN
audit_row.row_data = hstore(NEW.*) - excluded_cols;
ELSIF (TG_LEVEL = 'STATEMENT' AND TG_OP IN ('INSERT','UPDATE','DELETE','TRUNCATE')) THEN
audit_row.statement_only = 't';
ELSE
RAISE EXCEPTION '[audit.if_modified_func] - Trigger func added as trigger for unhandled case: %, %',TG_OP, TG_LEVEL;
RETURN NULL;
END IF;
INSERT INTO audit.logged_actions VALUES (audit_row.*);
RETURN NULL;
END;
$body$
LANGUAGE plpgsql
SECURITY DEFINER
SET search_path = pg_catalog, public;
COMMENT ON FUNCTION audit.if_modified_func() IS $body$
Track changes to a table at the statement and/or row level.
Optional parameters to trigger in CREATE TRIGGER call:
param 0: boolean, whether to log the query text. Default 't'.
param 1: text[], columns to ignore in updates. Default [].
Updates to ignored cols are omitted from changed_fields.
Updates with only ignored cols changed are not inserted
into the audit log.
Almost all the processing work is still done for updates
that ignored. If you need to save the load, you need to use
WHEN clause on the trigger instead.
No warning or error is issued if ignored_cols contains columns
that do not exist in the target table. This lets you specify
a standard set of ignored columns.
There is no parameter to disable logging of values. Add this trigger as
a 'FOR EACH STATEMENT' rather than 'FOR EACH ROW' trigger if you do not
want to log row values.
Note that the user name logged is the login role for the session. The audit trigger
cannot obtain the active role because it is reset by the SECURITY DEFINER invocation
of the audit trigger its self.
$body$;
CREATE OR REPLACE FUNCTION audit.audit_table(target_table regclass, audit_rows boolean, audit_query_text boolean, ignored_cols text[]) RETURNS void AS $body$
DECLARE
stm_targets text = 'INSERT OR UPDATE OR DELETE OR TRUNCATE';
_q_txt text;
_ignored_cols_snip text = '';
BEGIN
EXECUTE 'DROP TRIGGER IF EXISTS audit_trigger_row ON ' || target_table;
EXECUTE 'DROP TRIGGER IF EXISTS audit_trigger_stm ON ' || target_table;
IF audit_rows THEN
IF array_length(ignored_cols,1) > 0 THEN
_ignored_cols_snip = ', ' || quote_literal(ignored_cols);
END IF;
_q_txt = 'CREATE TRIGGER audit_trigger_row AFTER INSERT OR UPDATE OR DELETE ON ' ||
target_table ||
' FOR EACH ROW EXECUTE PROCEDURE audit.if_modified_func(' ||
quote_literal(audit_query_text) || _ignored_cols_snip || ');';
RAISE NOTICE '%',_q_txt;
EXECUTE _q_txt;
stm_targets = 'TRUNCATE';
ELSE
END IF;
_q_txt = 'CREATE TRIGGER audit_trigger_stm AFTER ' || stm_targets || ' ON ' ||
target_table ||
' FOR EACH STATEMENT EXECUTE PROCEDURE audit.if_modified_func('||
quote_literal(audit_query_text) || ');';
RAISE NOTICE '%',_q_txt;
EXECUTE _q_txt;
END;
$body$
language 'plpgsql';
COMMENT ON FUNCTION audit.audit_table(regclass, boolean, boolean, text[]) IS $body$
Add auditing support to a table.
Arguments:
target_table: Table name, schema qualified if not on search_path
audit_rows: Record each row change, or only audit at a statement level
audit_query_text: Record the text of the client query that triggered the audit event?
ignored_cols: Columns to exclude from update diffs, ignore updates that change only ignored cols.
$body$;
-- Pg doesn't allow variadic calls with 0 params, so provide a wrapper
CREATE OR REPLACE FUNCTION audit.audit_table(target_table regclass, audit_rows boolean, audit_query_text boolean) RETURNS void AS $body$
SELECT audit.audit_table($1, $2, $3, ARRAY[]::text[]);
$body$ LANGUAGE SQL;
-- And provide a convenience call wrapper for the simplest case
-- of row-level logging with no excluded cols and query logging enabled.
--
CREATE OR REPLACE FUNCTION audit.audit_table(target_table regclass) RETURNS void AS $$
SELECT audit.audit_table($1, BOOLEAN 't', BOOLEAN 't');
$$ LANGUAGE 'sql';
COMMENT ON FUNCTION audit.audit_table(regclass) IS $body$
Add auditing support to the given table. Row-level changes will be logged with full client query text. No cols are ignored.
$body$;

View File

@@ -0,0 +1,19 @@
CREATE TABLE IF NOT EXISTS stripe_payments (
order_id varchar(127) PRIMARY KEY,
created timestamp DEFAULT CURRENT_TIMESTAMP,
updated timestamp DEFAULT CURRENT_TIMESTAMP,
-- transaction_text text,
-- merchant_serial_number text,
-- payment_payload json,
stripe_initiation_response json,
stripe_transaction_id text,
stripe_status text DEFAULT 'CREATED',
-- stripe_failed_payment_status text,
-- stripe_failed_payment
-- stripe_confirmation_response json,
-- stripe_payment_method_type text DEFAULT NULL,
amount int DEFAULT 0,
amount_received int DEFAULT 0,
amount_captured int DEFAULT 0,
amount_refunded int DEFAULT 0
);

View File

@@ -0,0 +1,120 @@
import path from "path";
import fs from "fs";
import { promises as fsPromises } from "fs";
if (global.__base == undefined)
global.__base = path.join(__dirname, "../../../src/");
import db from "../index";
interface ISeedData {
model: string;
pk: string;
fields: {
[key: string]: string | number | boolean;
};
}
class SeedStep {
filepath: string;
filename: string;
data: Array<ISeedData>;
constructor(filepath) {
this.filepath = filepath;
this.filename = filepath.split("/").pop();
this.data = [];
}
readData() {
this.data = JSON.parse(fs.readFileSync(this.filepath, "utf-8"));
this.data = this.data.reverse();
}
get isApplied() {
const query = `SELECT * FROM seed WHERE filename = $1`;
return db
.query(query, [this.filename])
.then((resp) => (resp == 1 ? true : false));
}
commitStepToDb() {
const query = `INSERT INTO seed (filename) VALUES ($1)`;
return db.query(query, [this.filename]);
}
async executeSeedDatas() {
const seedData = this.data.pop();
const { model, pk, fields } = seedData;
const columns = Object.keys(fields);
const values = Object.values(fields);
const parameterKeys = Array.from(
{ length: values.length },
(v, k) => `$${k + 1}`
);
const query = `INSERT INTO ${model}
(${columns.join(",")})
VALUES
(${parameterKeys.join(",")})`;
await db.query(query, values);
if (this.data.length > 0) {
await this.executeSeedDatas();
}
}
async applySeedData() {
if (await this.isApplied) {
console.log(`⚠️ Step: ${this.filename}, already applied.`);
return;
}
console.log(`Seeding ${this.filename}:`);
const tables = Array.from(new Set(this.data.map((el) => el.model)));
const steps = this.data.length;
await this.executeSeedDatas();
await this.commitStepToDb();
console.log(
`🌱 ${steps} object(s) applied to table(s): ${tables.join(", ")}.`
);
}
}
/**
* UTILS
*/
const readSeedFiles = () => {
const seedFolder = path.join(__base, "database/seeds/");
console.log(`Reading seeds from folder: ${seedFolder}\n`);
return fsPromises
.readdir(seedFolder)
.then((files) =>
files.reverse().map((filePath) => {
const seedStep = new SeedStep(path.join(seedFolder, filePath));
seedStep.readData();
return seedStep;
})
)
.catch(console.log);
};
async function runAllSteps(seedSteps) {
const seedStep = seedSteps.pop();
await seedStep.applySeedData();
if (seedSteps.length > 0) await runAllSteps(seedSteps);
return Promise.resolve();
}
/**
* Runner
*/
readSeedFiles()
.then(async (seedSteps) => await runAllSteps(seedSteps))
.catch((error) => console.error(error))
.finally(() => process.exit(0));

View File

@@ -0,0 +1,70 @@
import path from "path";
import fs from "fs";
import { promises as fsPromises } from "fs";
if (global.__base == undefined)
global.__base = path.join(__dirname, "../../../src/");
import db from "../index";
const paymentTypes = `paymentTypes.sql`;
const products = `products.sql`;
const orders = `orders.sql`;
const vipps_payment = `vipps_payments.sql`;
const seed = `seed.sql`;
const schemas = [paymentTypes, products, orders, vipps_payment, seed];
const handleExit = (error = undefined) => {
if (error != undefined) {
console.log(`🚫 Exited with error: ${error}`);
process.exit(1);
}
console.log("✅ Exited setup successfully!");
process.exit(0);
};
const readSchemaFiles = () => {
const schemaFolder = path.join(__base, "database/schemas");
console.log("Reading schemas from folder:", schemaFolder);
return fsPromises.readdir(schemaFolder).then((files) =>
files.map((filename) => {
const filePath = path.join(schemaFolder, filename);
return fs.readFileSync(filePath, "utf-8");
})
);
};
async function processQuery(schemas) {
const schema = schemas.pop();
const re = /(^CREATE TABLE IF NOT EXISTS )(?<tb_name>\w*)/;
const match = schema.match(re);
const tableName = match?.groups["tb_name"];
if (tableName) console.log("✏️ Applying schema:", tableName);
else console.log("🧙‍♂️ applying something else");
await db.query(schema, null);
if (schemas.length > 0) {
await processQuery(schemas);
}
}
const applyAll = (schemas) => {
schemas = schemas.reverse();
return processQuery(schemas);
};
/**
* Runner
*/
readSchemaFiles()
.then((schemas) => applyAll(schemas))
.catch((err) => handleExit(err))
.then((_) => process.exit(0));
// db.connect()
// .then(client => setup(client, schemas))

View File

@@ -0,0 +1,51 @@
import fs from "fs";
import path from "path";
if (global.__base == undefined)
global.__base = path.join(__dirname, "../../../src/");
import db from "../index";
const allTableNames = () => {
const sql = `
SELECT tablename
FROM pg_catalog.pg_tables
WHERE schemaname != 'pg_catalog' AND
schemaname != 'information_schema'
`;
return db
.all(sql, [])
.then((rows) => rows.map((row) => row.tablename).reverse());
};
const teardown = (tableNames) => {
if (tableNames.length) {
console.log(`Tearing down tables:`);
console.log(` - ${tableNames.join("\n - ")}`);
const sql = `DROP TABLE IF EXISTS ${tableNames.join(",")} CASCADE`;
return db.query(sql, null);
} else {
console.log("No tables left to drop.");
return Promise.resolve();
}
};
const handleExit = (error = undefined) => {
if (error != undefined) {
console.log(`🚫 Exited with error: ${error}`);
process.exit(1);
}
console.log("✅ Exited teardown successfully!");
process.exit(0);
};
db.connect()
.then(() => allTableNames())
.then((tableNames) => teardown(tableNames))
.catch(console.log)
.finally(handleExit);
module.exports = db;

View File

@@ -0,0 +1,23 @@
[
{
"model": "payment_types",
"pk": 1,
"fields": {
"name": "eComm Regular Payment"
}
},
{
"model": "payment_types",
"pk": 2,
"fields": {
"name": "eComm Express Payment"
}
},
{
"model": "payment_types",
"pk": 3,
"fields": {
"name": "Stripe Card Payment"
}
}
]

View File

@@ -0,0 +1,198 @@
[
{
"model": "product",
"pk": 1,
"fields": {
"name": "Floral",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/floral.jpg",
"primary_color": "#E6E0DC"
}
},
{
"model": "product_sku",
"pk": 1,
"fields": {
"product_no": 1,
"price": 20.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 2,
"fields": {
"name": "Forrest",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/forrest.jpg",
"primary_color": "#C9B2A9"
}
},
{
"model": "product_sku",
"pk": 2,
"fields": {
"product_no": 2,
"price": 30,
"stock": 10,
"size": "Set"
}
},
{
"model": "product_sku",
"pk": 3,
"fields": {
"product_no": 2,
"price": 50.0,
"stock": 10,
"default_price": true,
"size": "Large"
}
},
{
"model": "product_sku",
"pk": 4,
"fields": {
"product_no": 2,
"price": 42.0,
"stock": 10,
"size": "Medium"
}
},
{
"model": "product",
"pk": 3,
"fields": {
"name": "Mush",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/mush.jpg",
"primary_color": "#231B1D"
}
},
{
"model": "product_sku",
"pk": 5,
"fields": {
"product_no": 3,
"price": 30.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 4,
"fields": {
"name": "The Buried Life",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/the-buried-life.jpg",
"primary_color": "#C9B2A9"
}
},
{
"model": "product_sku",
"pk": 6,
"fields": {
"product_no": 4,
"price": 20.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 5,
"fields": {
"name": "Cookie-Man Forrest",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/cookie-man-forrest.jpg",
"primary_color": "#E6E0DC"
}
},
{
"model": "product_sku",
"pk": 7,
"fields": {
"product_no": 5,
"price": 45.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 6,
"fields": {
"name": "Yorkshire Shoreline",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/yorkshire-shoreline.jpg",
"primary_color": "#E6E0DC"
}
},
{
"model": "product_sku",
"pk": 8,
"fields": {
"product_no": 6,
"price": 98.0,
"stock": 34,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 7,
"fields": {
"name": "Kneeling in Yemen",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/kneeling-in-yemen.jpg",
"primary_color": "#E6E0DC"
}
},
{
"model": "product_sku",
"pk": 9,
"fields": {
"product_no": 7,
"price": 78.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
},
{
"model": "product",
"pk": 8,
"fields": {
"name": "Spectural Forrest",
"subtext": "By W.H Auden",
"description": "Package include 10 pieces, Choose between A-F Key Value",
"image": "https://storage.googleapis.com/planetposen-images/spectural-forrest.jpg",
"primary_color": "#E6E0DC"
}
},
{
"model": "product_sku",
"pk": 10,
"fields": {
"product_no": 8,
"price": 60.0,
"stock": 10,
"default_price": true,
"size": "Set"
}
}
]

View File

@@ -0,0 +1,67 @@
[
{
"model": "customer",
"pk": 1,
"fields": {
"customer_no": "7CB9A6B8-A526-4836-BF4E-67E1075F8B83",
"email": "kevin.midboe@gmail.com",
"first_name": "kevin",
"last_name": "midbøe",
"street_address": "Schleppegrells gate 18",
"zip_code": "0556",
"city": "Oslo"
}
},
{
"model": "customer",
"pk": 2,
"fields": {
"customer_no": "FFF49A98-0A2F-437D-9069-9664ADB2FFFE",
"email": "Maia.Neteland@gmail.com",
"first_name": "Maia",
"last_name": "Neteland",
"street_address": "Mosekollen 14",
"zip_code": "0752",
"city": "Oslo"
}
},
{
"model": "customer",
"pk": 3,
"fields": {
"customer_no": "DFC94AB1-9BB6-4ECF-8747-3E12751892AB",
"email": "Aksel.Engeset@gmail.com",
"first_name": "Aksel",
"last_name": "Engeset",
"street_address": "Christian Bloms gate 210",
"zip_code": "3041",
"city": "DRAMMEN"
}
},
{
"model": "customer",
"pk": 4,
"fields": {
"customer_no": "E235456D-C884-4828-BB0F-5065056BD57A",
"email": "Thomas.Langemyr@gmail.com",
"first_name": "Thomas",
"last_name": "Langemyr",
"street_address": "Clausenbakken 208",
"zip_code": "1369",
"city": "Stabekk"
}
},
{
"model": "customer",
"pk": 5,
"fields": {
"customer_no": "3C1C1952-87E3-46A8-8B22-383B2F566E26",
"email": "Frida.Nilsen@gmail.com",
"first_name": "Frida",
"last_name": "Nilsen",
"street_address": "Jansbråteveien 195",
"zip_code": "1730",
"city": "Ise"
}
}
]

View File

@@ -0,0 +1,39 @@
[
{
"model": "cart",
"pk": 1,
"fields": {
"client_id": "800020696e96800f8904ea"
}
},
{
"model": "cart_lineitem",
"pk": 1,
"fields": {
"cart_id": 1,
"product_no": 2,
"product_sku_no": 1,
"quantity": 15
}
},
{
"model": "cart_lineitem",
"pk": 2,
"fields": {
"cart_id": 1,
"product_no": 2,
"product_sku_no": 3,
"quantity": 4
}
},
{
"model": "cart_lineitem",
"pk": 3,
"fields": {
"cart_id": 1,
"product_no": 1,
"product_sku_no": 1,
"quantity": 3
}
}
]

View File

@@ -0,0 +1,63 @@
[
{
"model": "orders",
"pk": 1,
"fields": {
"order_id": "fb9a5910-0dcf-4c65-9c25-3fb3eb883ce5",
"customer_no": "7CB9A6B8-A526-4836-BF4E-67E1075F8B83"
}
},
{
"model": "orders_lineitem",
"pk": 1,
"fields": {
"order_id": "fb9a5910-0dcf-4c65-9c25-3fb3eb883ce5",
"product_no": 2,
"product_sku_no": 2,
"price": 30,
"quantity": 3
}
},
{
"model": "orders_lineitem",
"pk": 2,
"fields": {
"order_id": "fb9a5910-0dcf-4c65-9c25-3fb3eb883ce5",
"product_no": 2,
"product_sku_no": 3,
"price": 50,
"quantity": 2
}
},
{
"model": "orders_lineitem",
"pk": 3,
"fields": {
"order_id": "fb9a5910-0dcf-4c65-9c25-3fb3eb883ce5",
"product_no": 6,
"product_sku_no": 8,
"price": 98,
"quantity": 18
}
},
{
"model": "orders",
"pk": 2,
"fields": {
"order_id": "2E9EB68E-4224-46C8-9AA2-3A13A55005BA",
"customer_no": "3C1C1952-87E3-46A8-8B22-383B2F566E26"
}
},
{
"model": "orders_lineitem",
"pk": 4,
"fields": {
"order_id": "2E9EB68E-4224-46C8-9AA2-3A13A55005BA",
"product_no": 2,
"product_sku_no": 2,
"price": 30,
"quantity": 1
}
}
]

View File

@@ -0,0 +1,13 @@
[
{
"model": "shipping",
"pk": 1,
"fields": {
"shipping_id": 1,
"order_id": "fb9a5910-0dcf-4c65-9c25-3fb3eb883ce5",
"shipping_company": "Posten BRING",
"tracking_code": "CS111111111NO",
"tracking_link": "https://sporing.posten.no/sporing/CS111111111NO"
}
}
]