refactor(general): removed uuid dependency, moved migrations into one file for the core module
This commit is contained in:
Родитель
dad5ca924a
Коммит
44c380c56e
|
@ -5,13 +5,38 @@ exports.up = async knex => {
|
|||
await knex.raw( 'CREATE EXTENSION IF NOT EXISTS "pgcrypto"' )
|
||||
await knex.raw( 'CREATE EXTENSION IF NOT EXISTS "ltree"' )
|
||||
|
||||
await knex.schema.createTable( 'users', table => {
|
||||
table.string( 'id', 10 ).primary( )
|
||||
table.string( 'username', 20 ).unique( ).notNullable( )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.string( 'name' ).notNullable( )
|
||||
table.string( 'email' ).unique( )
|
||||
table.jsonb( 'profiles' )
|
||||
table.text( 'password_digest' ) // bcrypted pwd
|
||||
table.bool( 'verified' ).defaultTo( false )
|
||||
} )
|
||||
|
||||
// Api tokens. TODO: add moar comments
|
||||
await knex.schema.createTable( 'api_token', table => {
|
||||
table.string( 'id', 10 ).primary( )
|
||||
table.string( 'token_digest' ).unique( )
|
||||
table.string( 'owner_id', 10 ).references( 'id' ).inTable( 'users' ).notNullable( )
|
||||
table.string( 'name' )
|
||||
table.string( 'last_chars', 6 )
|
||||
table.specificType( 'scopes', 'text[]' )
|
||||
table.boolean( 'revoked' ).defaultTo( false )
|
||||
table.bigint( 'lifespan' ).defaultTo( 3.154e+12 ) // defaults to a lifespan of 100 years
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.timestamp( 'last_used' ).defaultTo( knex.fn.now( ) )
|
||||
} )
|
||||
|
||||
// Streams Table
|
||||
await knex.schema.createTable( 'streams', table => {
|
||||
table.text( 'id' ).unique( ).primary( )
|
||||
table.text( 'name' )
|
||||
table.string( 'id', 10 ).primary( )
|
||||
table.string( 'name' )
|
||||
table.text( 'description' )
|
||||
table.boolean( 'isPublic' ).defaultTo( true )
|
||||
table.text( 'cloned_from' ).references( 'id' ).inTable( 'streams' )
|
||||
table.string( 'cloned_from', 10 ).references( 'id' ).inTable( 'streams' )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.timestamp( 'updated_at' ).defaultTo( knex.fn.now( ) )
|
||||
// table.unique( [ 'owner_id', 'name' ] )
|
||||
|
@ -27,9 +52,10 @@ exports.up = async knex => {
|
|||
END$$;
|
||||
` )
|
||||
|
||||
// Stream-users access control list.
|
||||
await knex.schema.createTable( 'stream_acl', table => {
|
||||
table.text( 'user_id' ).references( 'id' ).inTable( 'users' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.text( 'resource_id' ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.string( 'user_id', 10 ).references( 'id' ).inTable( 'users' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.string( 'resource_id', 10 ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.primary( [ 'user_id', 'resource_id' ] )
|
||||
table.unique( [ 'user_id', 'resource_id' ] )
|
||||
table.specificType( 'role', 'speckle_acl_role_type' ).defaultTo( 'write' )
|
||||
|
@ -37,29 +63,25 @@ exports.up = async knex => {
|
|||
|
||||
// Objects Table
|
||||
await knex.schema.createTable( 'objects', table => {
|
||||
table.text( 'hash' ).primary( )
|
||||
table.text( 'speckle_type' ).defaultTo( 'Base' ).notNullable( )
|
||||
table.text( 'applicationId' )
|
||||
table.string( 'hash' ).primary( )
|
||||
table.string( 'speckle_type' ).defaultTo( 'Base' ).notNullable( )
|
||||
table.string( 'applicationId' )
|
||||
table.jsonb( 'data' )
|
||||
table.text( 'author' ).references( 'id' ).inTable( 'users' )
|
||||
table.string( 'author', 10 ).references( 'id' ).inTable( 'users' )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.index( [ 'speckle_type' ], 'type_index' )
|
||||
} )
|
||||
|
||||
// Tree inheritance tracker
|
||||
await knex.schema.createTable( 'object_tree_refs', table => {
|
||||
table.increments( 'id' )
|
||||
table.text( 'parent' )
|
||||
table.string( 'parent' )
|
||||
table.specificType( 'path', 'ltree' )
|
||||
} )
|
||||
|
||||
await knex.raw( `CREATE INDEX tree_path_idx ON object_tree_refs USING gist(path)` )
|
||||
|
||||
// Sets a trigger to generate the hash of objects if not present.
|
||||
// File
|
||||
let hashTriggerObjects = require( './helperFunctions' ).hashTriggerGenerator( 'objects', 'hash' )
|
||||
await knex.raw( hashTriggerObjects )
|
||||
|
||||
// creates an enum type for db references.
|
||||
// creates an enum type for db reference types (branch, tag).
|
||||
await knex.raw( `
|
||||
DO $$
|
||||
BEGIN
|
||||
|
@ -71,14 +93,14 @@ exports.up = async knex => {
|
|||
|
||||
// Reference table. A reference can be a branch or a tag.
|
||||
await knex.schema.createTable( 'references', table => {
|
||||
table.uuid( 'id' ).defaultTo( knex.raw( 'gen_random_uuid()' ) ).unique( ).primary( )
|
||||
table.text( 'stream_id' ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.text( 'author' ).references( 'id' ).inTable( 'users' )
|
||||
table.text( 'name' )
|
||||
table.string( 'id', 10 ).primary( )
|
||||
table.string( 'stream_id', 10 ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.string( 'author', 10 ).references( 'id' ).inTable( 'users' )
|
||||
table.string( 'name' )
|
||||
table.specificType( 'type', 'speckle_reference_type' ).defaultTo( 'branch' )
|
||||
table.text( 'description' )
|
||||
// (Sparse) Only populated for tags, which hold one commit.
|
||||
table.text( 'commit_id' ).references( 'hash' ).inTable( 'objects' )
|
||||
table.string( 'commit_id' ).references( 'hash' ).inTable( 'objects' )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.timestamp( 'updatedAt' ).defaultTo( knex.fn.now( ) )
|
||||
table.unique( [ 'stream_id', 'name' ] )
|
||||
|
@ -87,16 +109,16 @@ exports.up = async knex => {
|
|||
// Junction Table Branches >- -< Commits
|
||||
// Note: Branches >- -< Commits is a many-to-many relationship (one commit can belong to multiple branches, one branch can have multiple commits)
|
||||
await knex.schema.createTable( 'branch_commits', table => {
|
||||
table.uuid( 'branch_id' ).references( 'id' ).inTable( 'references' ).notNullable( ).onDelete('cascade')
|
||||
table.text( 'commit_id' ).references( 'hash' ).inTable( 'objects' ).notNullable( )
|
||||
table.string( 'branch_id', 10 ).references( 'id' ).inTable( 'references' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.string( 'commit_id' ).references( 'hash' ).inTable( 'objects' ).notNullable( )
|
||||
table.primary( [ 'branch_id', 'commit_id' ] )
|
||||
} )
|
||||
|
||||
// Flat table to store all commits to this stream, regardless of branch.
|
||||
// Optional, might be removed as you can get all the commits from each branch...
|
||||
await knex.schema.createTable( 'stream_commits', table => {
|
||||
table.text( 'stream_id' ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete('cascade')
|
||||
table.text( 'commit_id' ).references( 'hash' ).inTable( 'objects' ).notNullable( )
|
||||
table.string( 'stream_id', 10 ).references( 'id' ).inTable( 'streams' ).notNullable( ).onDelete( 'cascade' )
|
||||
table.string( 'commit_id' ).references( 'hash' ).inTable( 'objects' ).notNullable( )
|
||||
table.primary( [ 'stream_id', 'commit_id' ] )
|
||||
} )
|
||||
|
||||
|
@ -111,6 +133,8 @@ exports.down = async knex => {
|
|||
await knex.schema.dropTableIfExists( 'object_tree_refs' )
|
||||
await knex.schema.dropTableIfExists( 'objects' )
|
||||
await knex.schema.dropTableIfExists( 'streams' )
|
||||
await knex.schema.dropTableIfExists( 'api_token' )
|
||||
await knex.schema.dropTableIfExists( 'users' )
|
||||
await knex.raw( `DROP TYPE IF EXISTS speckle_reference_type ` )
|
||||
await knex.raw( `DROP TYPE IF EXISTS speckle_acl_role_type ` )
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
'use strict'
|
||||
|
||||
// Knex table migrations
|
||||
exports.up = async knex => {
|
||||
await knex.raw( 'CREATE EXTENSION IF NOT EXISTS "pgcrypto"' )
|
||||
|
||||
await knex.schema.createTable( 'users', table => {
|
||||
table.text( 'id' ).unique( ).primary( )
|
||||
table.text( 'username' ).unique( ).notNullable( )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.text( 'name' ).notNullable( )
|
||||
table.text( 'email' ).unique( )
|
||||
table.jsonb( 'profiles' )
|
||||
table.text( 'password_digest' )
|
||||
table.bool( 'verified' ).defaultTo( false )
|
||||
} )
|
||||
|
||||
await knex.schema.createTable( 'api_token', table => {
|
||||
table.text( 'id' ).unique( ).primary( )
|
||||
table.text( 'token_digest' ).unique( )
|
||||
table.text( 'owner_id' ).references( 'id' ).inTable( 'users' ).notNullable( )
|
||||
table.text( 'name' )
|
||||
table.text( 'last_chars' )
|
||||
table.specificType( 'scopes', 'text[]' )
|
||||
table.boolean( 'revoked' ).defaultTo( false )
|
||||
table.bigint( 'lifespan' ).defaultTo( 3.154e+12 )
|
||||
table.timestamp( 'created_at' ).defaultTo( knex.fn.now( ) )
|
||||
table.timestamp( 'last_used' ).defaultTo( knex.fn.now( ) )
|
||||
} )
|
||||
|
||||
}
|
||||
|
||||
exports.down = async knex => {
|
||||
await knex.schema.dropTableIfExists( 'api_token' )
|
||||
await knex.schema.dropTableIfExists( 'users' )
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
// To make the migration parser happy, we add the customary up and down of knex.
|
||||
// They're ignored. A better way would be to conventionally exclude these types of files.
|
||||
up = async knex => {}
|
||||
down = async knex => {}
|
||||
|
||||
let hashTriggerGenerator = ( tableName, hostField, hashByField ) => `
|
||||
CREATE OR REPLACE FUNCTION ${tableName}_hash_${hostField}_update_tg() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
IF ( tg_op = 'INSERT' OR tg_op = 'UPDATE') AND (( NEW.${hostField} = '') IS NOT FALSE) THEN
|
||||
NEW.${hostField} = md5( ${ hashByField ? "NEW." + hashByField : "NEW" }::text );
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER ${tableName}_hash_${hostField}_update
|
||||
BEFORE INSERT OR UPDATE ON ${tableName}
|
||||
FOR EACH ROW EXECUTE PROCEDURE ${tableName}_hash_${hostField}_update_tg();
|
||||
`
|
||||
|
||||
// ref: https://blog.andyet.com/2016/02/23/generating-shortids-in-postgres/
|
||||
let shortIdTriggerGenerator = ( tableName, hostField ) => `
|
||||
CREATE OR REPLACE FUNCTION ${tableName}_shortid_update() RETURNS trigger AS $$
|
||||
|
||||
DECLARE
|
||||
key TEXT;
|
||||
qry TEXT;
|
||||
found TEXT;
|
||||
|
||||
BEGIN
|
||||
qry := 'SELECT ${hostField} FROM ' || quote_ident(TG_TABLE_NAME) || ' WHERE ${hostField}=';
|
||||
|
||||
LOOP
|
||||
|
||||
-- Generate our string bytes and re-encode as a base64 string.
|
||||
key := encode(gen_random_bytes(6), 'base64');
|
||||
|
||||
-- Base64 encoding contains 2 URL unsafe characters by default.
|
||||
-- The URL-safe version has these replacements.
|
||||
key := replace(key, '/', '_'); -- url safe replacement
|
||||
key := replace(key, '+', '-'); -- url safe replacement
|
||||
|
||||
-- Concat the generated key (safely quoted) with the generated query
|
||||
-- and run it.
|
||||
-- SELECT id FROM "test" WHERE id='blahblah' INTO found
|
||||
-- Now "found" will be the duplicated id or NULL.
|
||||
EXECUTE qry || quote_literal(key) INTO found;
|
||||
|
||||
-- Check to see if found is NULL.
|
||||
-- If we checked to see if found = NULL it would always be FALSE
|
||||
-- because (NULL = NULL) is always FALSE.
|
||||
IF found IS NULL THEN
|
||||
-- If we didn't find a collision then leave the LOOP.
|
||||
EXIT;
|
||||
END IF;
|
||||
|
||||
-- We haven't EXITed yet, so return to the top of the LOOP
|
||||
-- and try again.
|
||||
END LOOP;
|
||||
|
||||
-- NEW and OLD are available in TRIGGER PROCEDURES.
|
||||
-- NEW is the mutated row that will actually be INSERTed.
|
||||
-- We're replacing id, regardless of what it was before
|
||||
-- with our key variable.
|
||||
NEW.${hostField} = key;
|
||||
|
||||
-- The RECORD returned here is what will actually be INSERTed,
|
||||
-- or what the next trigger will get if there is one.
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER ${tableName}_shortid_update
|
||||
BEFORE INSERT ON ${tableName}
|
||||
FOR EACH ROW EXECUTE PROCEDURE ${tableName}_shortid_update();
|
||||
`
|
||||
|
||||
module.exports = { hashTriggerGenerator, shortIdTriggerGenerator, up, down }
|
|
@ -15,6 +15,7 @@ module.exports = {
|
|||
*/
|
||||
createTag: async ( tag, streamId, userId ) => {
|
||||
delete tag.commits // let's make sure
|
||||
tag.id = crs( { length: 10 } )
|
||||
tag.stream_id = streamId
|
||||
tag.author = userId
|
||||
tag.type = 'tag'
|
||||
|
@ -48,6 +49,8 @@ module.exports = {
|
|||
let commits = branch.commits || [ ]
|
||||
delete branch.commits
|
||||
delete branch.commit_id
|
||||
branch.id = crs( { length: 10 } )
|
||||
|
||||
branch.stream_id = streamId
|
||||
branch.author = userId
|
||||
branch.type = 'branch'
|
||||
|
@ -57,7 +60,7 @@ module.exports = {
|
|||
let branchCommits = commits.map( commitId => { return { branch_id: id, commit_id: commitId } } )
|
||||
await knex.raw( BranchCommits( ).insert( branchCommits ) + ' on conflict do nothing' )
|
||||
}
|
||||
return id
|
||||
return branch.id
|
||||
},
|
||||
|
||||
updateBranch: async ( branch ) => {
|
||||
|
@ -78,7 +81,7 @@ module.exports = {
|
|||
},
|
||||
|
||||
getBranchById: async ( branchId ) => {
|
||||
let [ branch ] = await Refs( ).where( { id: branchId, type: 'branch' } ).select( '*' )
|
||||
let branch = await Refs( ).where( { id: branchId, type: 'branch' } ).first( ).select( '*' )
|
||||
let commits = await BranchCommits( ).where( { branch_id: branchId } )
|
||||
branch.commits = commits.map( c => c.commit_id )
|
||||
|
||||
|
|
|
@ -79,7 +79,8 @@ describe( 'Tags & Branches', ( ) => {
|
|||
|
||||
it( 'Should not allow dupe branches', async ( ) => {
|
||||
try {
|
||||
await createBranch( branch, stream.id, user.id )
|
||||
let dupeBranch = { ...branch }
|
||||
await createBranch( dupeBranch, stream.id, user.id )
|
||||
assert.fail( 'Duplicate branches should not be allowed.' )
|
||||
} catch ( err ) {
|
||||
// Pass
|
||||
|
@ -125,7 +126,8 @@ describe( 'Tags & Branches', ( ) => {
|
|||
|
||||
it( 'Should not allow for duplicate tags', async ( ) => {
|
||||
try {
|
||||
await createTag( tag, stream.id, user.id )
|
||||
let dupeTag = { ...tag }
|
||||
await createTag( dupeTag, stream.id, user.id )
|
||||
assert.fail( )
|
||||
} catch {
|
||||
// Pass
|
||||
|
|
Загрузка…
Ссылка в новой задаче