You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
boundary/internal/db/migrations/postgres.gen.go

5361 lines
180 KiB

// Code generated by "make migrations"; DO NOT EDIT.
package migrations
var postgresMigrations = map[string]*fakeFile{
"migrations": {
name: "migrations",
},
"migrations/01_domain_types.down.sql": {
name: "01_domain_types.down.sql",
bytes: []byte(`
begin;
drop domain wt_timestamp;
drop domain wt_public_id;
drop domain wt_private_id;
drop domain wt_scope_id;
drop domain wt_user_id;
drop domain wt_version;
drop function default_create_time;
drop function update_time_column;
drop function update_version_column;
drop function immutable_columns;
commit;
`),
},
"migrations/01_domain_types.up.sql": {
name: "01_domain_types.up.sql",
bytes: []byte(`
begin;
create domain wt_public_id as text
check(
length(trim(value)) > 10
);
comment on domain wt_public_id is
'Random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
create domain wt_private_id as text
not null
check(
length(trim(value)) > 10
);
comment on domain wt_private_id is
'Random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
create domain wt_scope_id as text
check(
length(trim(value)) > 10 or value = 'global'
);
comment on domain wt_scope_id is
'"global" or random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
create domain wt_user_id as text
not null
check(
length(trim(value)) > 10 or value = 'u_anon' or value = 'u_auth' or value = 'u_recovery'
);
comment on domain wt_scope_id is
'"u_anon", "u_auth", or random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
create domain wt_role_id as text
not null
check(
length(trim(value)) > 10
);
comment on domain wt_scope_id is
'Random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
create domain wt_timestamp as
timestamp with time zone
default current_timestamp;
comment on domain wt_timestamp is
'Standard timestamp for all create_time and update_time columns';
create or replace function
update_time_column()
returns trigger
as $$
begin
if row(new.*) is distinct from row(old.*) then
new.update_time = now();
return new;
else
return old;
end if;
end;
$$ language plpgsql;
comment on function
update_time_column()
is
'function used in before update triggers to properly set update_time columns';
create or replace function
default_create_time()
returns trigger
as $$
begin
if new.create_time is distinct from now() then
raise warning 'create_time cannot be set to %', new.create_time;
new.create_time = now();
end if;
return new;
end;
$$ language plpgsql;
comment on function
default_create_time()
is
'function used in before insert triggers to set create_time column to now';
create domain wt_version as bigint
default 1
not null
check(
value > 0
);
comment on domain wt_version is
'standard column for row version';
-- update_version_column() will increment the version column whenever row data
-- is updated and should only be used in an update after trigger. This function
-- will overwrite any explicit updates to the version column. The function
-- accepts an optional parameter of 'private_id' for the tables primary key.
create or replace function
update_version_column()
returns trigger
as $$
begin
if pg_trigger_depth() = 1 then
if row(new.*) is distinct from row(old.*) then
if tg_nargs = 0 then
execute format('update %I set version = $1 where public_id = $2', tg_relid::regclass) using old.version+1, new.public_id;
new.version = old.version + 1;
return new;
end if;
if tg_argv[0] = 'private_id' then
execute format('update %I set version = $1 where private_id = $2', tg_relid::regclass) using old.version+1, new.private_id;
new.version = old.version + 1;
return new;
end if;
end if;
end if;
return new;
end;
$$ language plpgsql;
comment on function
update_version_column()
is
'function used in after update triggers to properly set version columns';
-- immutable_columns() will make the column names immutable which are passed as
-- parameters when the trigger is created. It raises error code 23601 which is a
-- class 23 integrity constraint violation: immutable column
create or replace function
immutable_columns()
returns trigger
as $$
declare
col_name text;
new_value text;
old_value text;
begin
foreach col_name in array tg_argv loop
execute format('SELECT $1.%I', col_name) into new_value using new;
execute format('SELECT $1.%I', col_name) into old_value using old;
if new_value is distinct from old_value then
raise exception 'immutable column: %.%', tg_table_name, col_name using
errcode = '23601',
schema = tg_table_schema,
table = tg_table_name,
column = col_name;
end if;
end loop;
return new;
end;
$$ language plpgsql;
comment on function
immutable_columns()
is
'function used in before update triggers to make columns immutable';
commit;
`),
},
"migrations/02_oplog.down.sql": {
name: "02_oplog.down.sql",
bytes: []byte(`
begin;
drop table oplog_metadata cascade;
drop table oplog_ticket cascade;
drop table oplog_entry cascade;
commit;
`),
},
"migrations/02_oplog.up.sql": {
name: "02_oplog.up.sql",
bytes: []byte(`
begin;
-- TODO (jimlambrt 7/2020) remove update_time
create table if not exists oplog_entry (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
version text not null,
aggregate_name text not null,
"data" bytea not null
);
create trigger
update_time_column
before
update on oplog_entry
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_entry
for each row execute procedure default_create_time();
-- oplog_entry is immutable.
create trigger
immutable_columns
before
update on oplog_entry
for each row execute procedure immutable_columns('id','update_time','create_time','version','aggregate_name', 'data');
create table if not exists oplog_ticket (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
"name" text not null unique,
"version" bigint not null
);
create trigger
update_time_column
before
update on oplog_ticket
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_ticket
for each row execute procedure default_create_time();
-- oplog_ticket: only allow updates to: version and update_time
create trigger
immutable_columns
before
update on oplog_ticket
for each row execute procedure immutable_columns('id','create_time','name');
-- TODO (jimlambrt 7/2020) remove update_time
create table if not exists oplog_metadata (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
entry_id bigint not null references oplog_entry(id) on delete cascade on update cascade,
"key" text not null,
value text null
);
create trigger
update_time_column
before
update on oplog_metadata
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_metadata
for each row execute procedure default_create_time();
-- oplog_metadata is immutable
create trigger
immutable_columns
before
update on oplog_metadata
for each row execute procedure immutable_columns('id','create_time','update_time','entry_id','key','value');
create index if not exists idx_oplog_metatadata_key on oplog_metadata(key);
create index if not exists idx_oplog_metatadata_value on oplog_metadata(value);
insert into oplog_ticket (name, version)
values
('auth_token', 1),
('default', 1),
('iam_scope', 1),
('iam_user', 1),
('iam_group', 1),
('iam_group_member', 1),
('iam_role', 1),
('iam_role_grant', 1),
('iam_group_role', 1),
('iam_user_role', 1),
('db_test_user', 1),
('db_test_car', 1),
('db_test_rental', 1),
('db_test_scooter', 1),
('auth_account', 1),
('iam_principal_role', 1);
commit;
`),
},
"migrations/03_db.down.sql": {
name: "03_db.down.sql",
bytes: []byte(`
begin;
drop table db_test_rental cascade;
drop table db_test_car cascade;
drop table db_test_user cascade;
drop table db_test_scooter cascade;
commit;
`),
},
"migrations/03_db.up.sql": {
name: "03_db.up.sql",
bytes: []byte(`
begin;
-- create test tables used in the unit tests for the internal/db package
-- these tables (db_test_user, db_test_car, db_test_rental, db_test_scooter) are
-- not part of the boundary domain model... they are simply used for testing
-- the internal/db package
create table if not exists db_test_user (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
phone_number text,
email text,
version wt_version
);
create trigger
update_time_column
before
update on db_test_user
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_user
create trigger
immutable_columns
before
update on db_test_user
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_user
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on db_test_user
for each row execute procedure update_version_column();
create table if not exists db_test_car (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
model text,
mpg smallint
);
create trigger
update_time_column
before
update on db_test_car
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_car
create trigger
immutable_columns
before
update on db_test_car
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_car
for each row execute procedure default_create_time();
create table if not exists db_test_rental (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
user_id bigint not null references db_test_user(id),
car_id bigint not null references db_test_car(id)
);
create trigger
update_time_column
before
update on db_test_rental
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_rental
create trigger
immutable_columns
before
update on db_test_rental
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_rental
for each row execute procedure default_create_time();
create table if not exists db_test_scooter (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
private_id text not null unique,
name text unique,
model text,
mpg smallint
);
create trigger
update_time_column
before
update on db_test_scooter
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_scooter
create trigger
immutable_columns
before
update on db_test_scooter
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_scooter
for each row execute procedure default_create_time();
commit;
`),
},
"migrations/06_iam.down.sql": {
name: "06_iam.down.sql",
bytes: []byte(`
BEGIN;
drop table iam_group cascade;
drop table iam_user cascade;
drop table iam_scope_project cascade;
drop table iam_scope_org cascade;
drop table iam_scope_global cascade;
drop table iam_scope cascade;
drop table iam_scope_type_enm cascade;
drop table iam_role cascade;
drop view iam_principal_role cascade;
drop table iam_group_role cascade;
drop table iam_user_role cascade;
drop table iam_group_member_user cascade;
drop view iam_group_member cascade;
drop table iam_role_grant cascade;
drop function iam_sub_names cascade;
drop function iam_immutable_scope_type_func cascade;
drop function iam_sub_scopes_func cascade;
drop function iam_immutable_role_principal cascade;
drop function iam_user_role_scope_check cascade;
drop function iam_group_role_scope_check cascade;
drop function iam_group_member_scope_check cascade;
drop function iam_immutable_group_member cascade;
drop function get_scoped_member_id cascade;
drop function grant_scope_id_valid cascade;
drop function disallow_global_scope_deletion cascade;
drop function user_scope_id_valid cascade;
drop function iam_immutable_role_grant cascade;
drop function disallow_iam_predefined_user_deletion cascade;
drop function recovery_user_not_allowed cascade;
COMMIT;
`),
},
"migrations/06_iam.up.sql": {
name: "06_iam.up.sql",
bytes: []byte(`
begin;
create table iam_scope_type_enm (
string text not null primary key
constraint only_predefined_scope_types_allowed
check(string in ('unknown', 'global', 'org', 'project'))
);
insert into iam_scope_type_enm (string)
values
('unknown'),
('global'),
('org'),
('project');
-- define the immutable fields of iam_scope_type_enm
create trigger
immutable_columns
before
update on iam_scope_type_enm
for each row execute procedure immutable_columns('string');
create table iam_scope (
public_id wt_scope_id primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
type text not null
references iam_scope_type_enm(string)
constraint only_known_scope_types_allowed
check(
(
type = 'global'
and parent_id is null
)
or (
type = 'org'
and parent_id = 'global'
)
or (
type = 'project'
and parent_id is not null
and parent_id != 'global'
)
),
description text,
parent_id text references iam_scope(public_id) on delete cascade on update cascade,
-- version allows optimistic locking of the role when modifying the role
-- itself and when modifying dependent items like principal roles.
version wt_version
);
create table iam_scope_global (
scope_id wt_scope_id primary key
references iam_scope(public_id)
on delete cascade
on update cascade
constraint only_one_global_scope_allowed
check(
scope_id = 'global'
),
name text unique
);
create table iam_scope_org (
scope_id wt_scope_id primary key
references iam_scope(public_id)
on delete cascade
on update cascade,
parent_id wt_scope_id
not null
references iam_scope_global(scope_id)
on delete cascade
on update cascade,
name text,
unique (parent_id, name)
);
create table iam_scope_project (
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
parent_id wt_public_id not null references iam_scope_org(scope_id) on delete cascade on update cascade,
name text,
unique(parent_id, name),
unique(scope_id),
primary key(scope_id, parent_id)
);
create or replace function
iam_sub_scopes_func()
returns trigger
as $$
declare parent_type int;
begin
if new.type = 'global' then
insert into iam_scope_global (scope_id, name)
values
(new.public_id, new.name);
return new;
end if;
if new.type = 'org' then
insert into iam_scope_org (scope_id, parent_id, name)
values
(new.public_id, new.parent_id, new.name);
return new;
end if;
if new.type = 'project' then
insert into iam_scope_project (scope_id, parent_id, name)
values
(new.public_id, new.parent_id, new.name);
return new;
end if;
raise exception 'unknown scope type';
end;
$$ language plpgsql;
create trigger
iam_scope_insert
after
insert on iam_scope
for each row execute procedure iam_sub_scopes_func();
create or replace function
disallow_global_scope_deletion()
returns trigger
as $$
begin
if old.type = 'global' then
raise exception 'deletion of global scope not allowed';
end if;
return old;
end;
$$ language plpgsql;
create trigger
iam_scope_disallow_global_deletion
before
delete on iam_scope
for each row execute procedure disallow_global_scope_deletion();
create trigger
update_time_column
before update on iam_scope
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_scope
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on iam_scope
for each row execute procedure update_version_column();
-- define the immutable fields for iam_scope
create trigger
immutable_columns
before
update on iam_scope
for each row execute procedure immutable_columns('public_id', 'create_time', 'type', 'parent_id');
-- define the immutable fields of iam_scope_global
create trigger
immutable_columns
before
update on iam_scope_global
for each row execute procedure immutable_columns('scope_id');
-- define the immutable fields of iam_scope_org
create trigger
immutable_columns
before
update on iam_scope_org
for each row execute procedure immutable_columns('scope_id');
-- define the immutable fields of iam_scope_project
create trigger
immutable_columns
before
update on iam_scope_project
for each row execute procedure immutable_columns('scope_id');
-- iam_sub_names will allow us to enforce the different name constraints for
-- orgs and projects via a before update trigger on the iam_scope
-- table.
create or replace function
iam_sub_names()
returns trigger
as $$
begin
if new.name != old.name then
if new.type = 'global' then
update iam_scope_global set name = new.name where scope_id = old.public_id;
return new;
end if;
if new.type = 'org' then
update iam_scope_org set name = new.name where scope_id = old.public_id;
return new;
end if;
if new.type = 'project' then
update iam_scope_project set name = new.name where scope_id = old.public_id;
return new;
end if;
raise exception 'unknown scope type';
end if;
return new;
end;
$$ language plpgsql;
create trigger
iam_sub_names
before
update on iam_scope
for each row execute procedure iam_sub_names();
insert into iam_scope (public_id, name, type, description)
values ('global', 'global', 'global', 'Global Scope');
create table iam_user (
public_id wt_user_id
primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
version wt_version,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
create or replace function
user_scope_id_valid()
returns trigger
as $$
begin
perform from iam_scope where public_id = new.scope_id and type in ('global', 'org');
if not found then
raise exception 'invalid scope type for user creation';
end if;
return new;
end;
$$ language plpgsql;
create or replace function
grant_scope_id_valid()
returns trigger
as $$
declare parent_scope_id text;
declare role_scope_type text;
begin
-- There is a not-null constraint so ensure that if the value passed in is
-- empty we simply set to the scope ID
if new.grant_scope_id = '' or new.grant_scope_id is null then
new.grant_scope_id = new.scope_id;
end if;
-- If the scopes match, it's allowed
if new.grant_scope_id = new.scope_id then
return new;
end if;
-- Fetch the type of scope
select isc.type from iam_scope isc where isc.public_id = new.scope_id into role_scope_type;
-- Always allowed
if role_scope_type = 'global' then
return new;
end if;
-- Never allowed; the case where it's set to the same scope ID as the project
-- itself is covered above
if role_scope_type = 'project' then
raise exception 'invalid to set grant_scope_id to non-same scope_id when role scope type is project';
end if;
if role_scope_type = 'org' then
-- Look up the parent scope ID for the scope ID given
select isc.parent_id from iam_scope isc where isc.public_id = new.grant_scope_id into parent_scope_id;
-- Allow iff the grant scope ID's parent matches the role's scope ID; that
-- is, match if the role belongs to a direct child scope of this
-- org
if parent_scope_id = new.scope_id then
return new;
end if;
raise exception 'grant_scope_id is not a child project of the role scope';
end if;
raise exception 'unknown scope type';
end;
$$ language plpgsql;
create or replace function
disallow_iam_predefined_user_deletion()
returns trigger
as $$
begin
if old.public_id = 'u_anon' then
raise exception 'deletion of anonymous user not allowed';
end if;
if old.public_id = 'u_auth' then
raise exception 'deletion of authenticated user not allowed';
end if;
if old.public_id = 'u_recovery' then
raise exception 'deletion of recovery user not allowed';
end if;
return old;
end;
$$ language plpgsql;
create trigger
update_version_column
after update on iam_user
for each row execute procedure update_version_column();
create trigger
ensure_user_scope_id_valid
before
insert or update on iam_user
for each row execute procedure user_scope_id_valid();
create trigger
update_time_column
before update on iam_user
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_user
for each row execute procedure default_create_time();
create trigger
iam_user_disallow_predefined_user_deletion
before
delete on iam_user
for each row execute procedure disallow_iam_predefined_user_deletion();
-- TODO: Do we want to disallow changing the name or description?
insert into iam_user (public_id, name, description, scope_id)
values ('u_anon', 'anonymous', 'The anonymous user matches any request, whether authenticated or not', 'global');
insert into iam_user (public_id, name, description, scope_id)
values ('u_auth', 'authenticated', 'The authenticated user matches any user that has a valid token', 'global');
insert into iam_user (public_id, name, description, scope_id)
values ('u_recovery', 'recovery', 'The recovery user is used for any request that was performed with the recovery KMS workflow', 'global');
-- define the immutable fields for iam_user
create trigger
immutable_columns
before
update on iam_user
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
create table iam_role (
public_id wt_role_id primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
grant_scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
version wt_version,
-- add unique index so a composite fk can be declared.
unique(scope_id, public_id)
);
-- Grants are immutable, which is enforced via the trigger below
create table iam_role_grant (
create_time wt_timestamp,
role_id wt_role_id -- pk
references iam_role(public_id)
on delete cascade
on update cascade,
canonical_grant text -- pk
constraint canonical_grant_must_not_be_empty
check(
length(trim(canonical_grant)) > 0
),
raw_grant text not null
constraint raw_grant_must_not_be_empty
check(
length(trim(raw_grant)) > 0
),
primary key(role_id, canonical_grant)
);
-- iam_immutable_role_grant() ensures that grants assigned to roles are immutable.
create or replace function
iam_immutable_role_grant()
returns trigger
as $$
begin
raise exception 'role grants are immutable';
end;
$$ language plpgsql;
create trigger immutable_role_grant
before
update on iam_role_grant
for each row execute procedure iam_immutable_role_grant();
create trigger
default_create_time_column
before
insert on iam_role_grant
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on iam_role
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on iam_role
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_role
for each row execute procedure default_create_time();
create trigger
ensure_grant_scope_id_valid
before
insert or update on iam_role
for each row execute procedure grant_scope_id_valid();
-- define the immutable fields for iam_role (started trigger name with "a_" so
-- it will run first)
create trigger
a_immutable_columns
before
update on iam_role
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
create or replace function
recovery_user_not_allowed()
returns trigger
as $$
declare
new_value text;
begin
execute format('SELECT $1.%I', tg_argv[0]) into new_value using new;
if new_value = 'u_recovery' then
raise exception '"u_recovery" not allowed here"';
end if;
return new;
end;
$$ language plpgsql;
create table iam_group (
public_id wt_public_id
primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
-- version allows optimistic locking of the group when modifying the group
-- itself and when modifying dependent items like group members.
version wt_version,
-- add unique index so a composite fk can be declared.
unique(scope_id, public_id)
);
create trigger
update_version_column
after update on iam_group
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on iam_group
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_group
for each row execute procedure default_create_time();
-- define the immutable fields for iam_group
create trigger
immutable_columns
before
update on iam_group
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
-- iam_user_role contains roles that have been assigned to users. Users can be
-- from any scope. The rows in this table must be immutable after insert, which
-- will be ensured with a before update trigger using
-- iam_immutable_role_principal().
create table iam_user_role (
create_time wt_timestamp,
role_id wt_role_id
references iam_role(public_id)
on delete cascade
on update cascade,
principal_id wt_user_id
references iam_user(public_id)
on delete cascade
on update cascade,
primary key (role_id, principal_id)
);
-- iam_group_role contains roles that have been assigned to groups.
-- Groups can be from any scope. The rows in this table must be immutable after
-- insert, which will be ensured with a before update trigger using
-- iam_immutable_role_principal().
create table iam_group_role (
create_time wt_timestamp,
role_id wt_role_id
references iam_role(public_id)
on delete cascade
on update cascade,
principal_id wt_public_id
references iam_group(public_id)
on delete cascade
on update cascade,
primary key (role_id, principal_id)
);
-- get_scoped_principal_id is used by the iam_principle_role view as a convient
-- way to create <scope_id>:<principal_id> to reference principals from
-- other scopes than the role's scope.
create or replace function get_scoped_principal_id(role_scope text, principal_scope text, principal_id text) returns text
as $$
begin
if role_scope = principal_scope then
return principal_id;
end if;
return principal_scope || ':' || principal_id;
end;
$$ language plpgsql;
-- iam_principle_role provides a consolidated view all principal roles assigned
-- (user and group roles).
create view iam_principal_role as
select
ur.create_time,
ur.principal_id,
ur.role_id,
u.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, u.scope_id, ur.principal_id) as scoped_principal_id,
'user' as type
from
iam_user_role ur,
iam_role r,
iam_user u
where
ur.role_id = r.public_id and
u.public_id = ur.principal_id
union
select
gr.create_time,
gr.principal_id,
gr.role_id,
g.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, g.scope_id, gr.principal_id) as scoped_principal_id,
'group' as type
from
iam_group_role gr,
iam_role r,
iam_group g
where
gr.role_id = r.public_id and
g.public_id = gr.principal_id;
-- iam_immutable_role_principal() ensures that roles assigned to principals are immutable.
create or replace function
iam_immutable_role_principal()
returns trigger
as $$
begin
raise exception 'roles are immutable';
end;
$$ language plpgsql;
create trigger immutable_role_principal
before
update on iam_user_role
for each row execute procedure iam_immutable_role_principal();
create trigger
recovery_user_not_allowed_user_role
before
insert on iam_user_role
for each row execute procedure recovery_user_not_allowed('principal_id');
create trigger
default_create_time_column
before
insert on iam_user_role
for each row execute procedure default_create_time();
create trigger immutable_role_principal
before
update on iam_group_role
for each row execute procedure iam_immutable_role_principal();
create trigger
default_create_time_column
before
insert on iam_group_role
for each row execute procedure default_create_time();
-- iam_group_member_user is an association table that represents groups with
-- associated users.
create table iam_group_member_user (
create_time wt_timestamp,
group_id wt_public_id references iam_group(public_id) on delete cascade on update cascade,
member_id wt_user_id references iam_user(public_id) on delete cascade on update cascade,
primary key (group_id, member_id)
);
-- iam_immutable_group_member() ensures that group members are immutable.
create or replace function
iam_immutable_group_member()
returns trigger
as $$
begin
raise exception 'group members are immutable';
end;
$$ language plpgsql;
create trigger
default_create_time_column
before
insert on iam_group_member_user
for each row execute procedure default_create_time();
create trigger iam_immutable_group_member
before
update on iam_group_member_user
for each row execute procedure iam_immutable_group_member();
create trigger
recovery_user_not_allowed_group_member
before
insert on iam_group_member_user
for each row execute procedure recovery_user_not_allowed('member_id');
-- get_scoped_principal_id is used by the iam_group_member view as a convient
-- way to create <scope_id>:<member_id> to reference members from
-- other scopes than the group's scope.
create or replace function get_scoped_member_id(group_scope text, member_scope text, member_id text) returns text
as $$
begin
if group_scope = member_scope then
return member_id;
end if;
return member_scope || ':' || member_id;
end;
$$ language plpgsql;
-- iam_group_member provides a consolidated view of group members.
create view iam_group_member as
select
gm.create_time,
gm.group_id,
gm.member_id,
u.scope_id as member_scope_id,
g.scope_id as group_scope_id,
get_scoped_member_id(g.scope_id, u.scope_id, gm.member_id) as scoped_member_id,
'user' as type
from
iam_group_member_user gm,
iam_user u,
iam_group g
where
gm.member_id = u.public_id and
gm.group_id = g.public_id;
commit;
`),
},
"migrations/07_auth.down.sql": {
name: "07_auth.down.sql",
bytes: []byte(`
begin;
drop function update_iam_user_auth_account;
drop function insert_auth_account_subtype;
drop function insert_auth_method_subtype;
drop table auth_account cascade;
drop table auth_method cascade;
commit;
`),
},
"migrations/07_auth.up.sql": {
name: "07_auth.up.sql",
bytes: []byte(`
begin;
/*
┌────────────────┐ ┌────────────────┐
│ iam_scope │ │ auth_method │
├────────────────┤ ├────────────────┤
│ public_id (pk) │ ╱│ public_id (pk) │
│ │┼┼───────────○─│ scope_id (fk) │
│ │ ╲│ │
└────────────────┘ └────────────────┘
┼ ┼
┼ ┼
│ │
│ │ ▲fk1
│ │
○ ○
╱│╲ ╱│╲
┌────────────────┐ ┌──────────────────────────┐
│ iam_user │ │ auth_account │
├────────────────┤ ├──────────────────────────┤
│ public_id (pk) │ │ public_id (pk) │
│ scope_id (fk) │ ◀fk2 │ scope_id (fk1) │
│ │┼○──────○┼│ auth_method_id (fk1) │
│ │ │ iam_user_scope_id (fk2) │
└────────────────┘ │ iam_user_id (fk2) │
└──────────────────────────┘
An iam_scope can have 0 to many iam_users.
An iam_scope can have 0 to many auth_methods.
An iam_user belongs to 1 iam_scope.
An auth_method belongs to 1 iam_scope.
An iam_user can have 0 or 1 auth_account.
An auth_account belongs to 0 or 1 iam_user.
An auth_method can have 0 to many auth_accounts.
An auth_account belongs to 1 auth_account.
An auth_account can only be associated with an iam_user in the same scope of
the auth_account's auth_method. Including scope_id in fk1 and fk2 ensures this
restriction is not violated.
Design influenced by:
https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
*/
-- base table for auth methods
create table auth_method (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
-- base table for auth accounts
create table auth_account (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
scope_id wt_scope_id
not null,
iam_user_id wt_public_id,
-- The auth_account can only be assigned to an iam_user in the same scope as
-- the auth_method the auth_account belongs to. A separate column for
-- iam_user's scope id is needed because using the scope_id column in the
-- foreign key constraint causes an error when the iam_user is deleted but
-- the auth_account still exists. This is a valid scenario since the
-- lifetime of the auth_account is tied to the auth_method not the iam_user.
iam_user_scope_id wt_scope_id,
constraint user_and_auth_account_in_same_scope
check(
(iam_user_id is null and iam_user_scope_id is null)
or
(iam_user_id is not null and (iam_user_scope_id = scope_id))
),
-- including scope_id in fk1 and fk2 ensures the scope_id of the owning
-- auth_method and the scope_id of the owning iam_user are the same
foreign key (scope_id, auth_method_id) -- fk1
references auth_method (scope_id, public_id)
on delete cascade
on update cascade,
foreign key (iam_user_scope_id, iam_user_id) -- fk2
references iam_user (scope_id, public_id)
on delete set null
on update cascade,
unique(scope_id, auth_method_id, public_id)
);
create or replace function
insert_auth_method_subtype()
returns trigger
as $$
begin
insert into auth_method
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
create or replace function
insert_auth_account_subtype()
returns trigger
as $$
begin
select auth_method.scope_id
into new.scope_id
from auth_method
where auth_method.public_id = new.auth_method_id;
insert into auth_account
(public_id, auth_method_id, scope_id)
values
(new.public_id, new.auth_method_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- update_iam_user_auth_account is a before update trigger on the auth_account
-- table. If the new.iam_user_id column is different from the old.iam_user_id
-- column, update_iam_user_auth_account retrieves the scope id of the iam user
-- and sets new.iam_user_scope_id to that value. If the new.iam_user_id column
-- is null and the old.iam_user_id column is not null,
-- update_iam_user_auth_account sets the iam_user_scope_id to null.
create or replace function
update_iam_user_auth_account()
returns trigger
as $$
begin
if new.iam_user_id is distinct from old.iam_user_id then
if new.iam_user_id is null then
new.iam_user_scope_id = null;
else
select iam_user.scope_id into new.iam_user_scope_id
from iam_user
where iam_user.public_id = new.iam_user_id;
end if;
end if;
return new;
end;
$$ language plpgsql;
create trigger update_iam_user_auth_account
before update of iam_user_id on auth_account
for each row
execute procedure update_iam_user_auth_account();
commit;
`),
},
"migrations/08_servers.down.sql": {
name: "08_servers.down.sql",
bytes: []byte(`
begin;
drop table server;
commit;
`),
},
"migrations/08_servers.up.sql": {
name: "08_servers.up.sql",
bytes: []byte(`
begin;
-- For now at least the IDs will be the same as the name, because this allows us
-- to not have to persist some generated ID to worker and controller nodes.
-- Eventually we may want them to diverge, so we have both here for now.
create table server (
private_id text,
type text,
name text not null unique
constraint server_name_must_not_be_empty
check(length(trim(name)) > 0),
description text,
address text,
create_time wt_timestamp,
update_time wt_timestamp,
primary key (private_id, type)
);
create trigger
immutable_columns
before
update on server
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on server
for each row execute procedure default_create_time();
create table recovery_nonces (
nonce text
primary key,
create_time wt_timestamp
);
create trigger
default_create_time_column
before
insert on recovery_nonces
for each row execute procedure default_create_time();
create trigger
immutable_columns
before
update on recovery_nonces
for each row execute procedure immutable_columns('nonce', 'create_time');
commit;
`),
},
"migrations/11_auth_token.down.sql": {
name: "11_auth_token.down.sql",
bytes: []byte(`
begin;
drop view auth_token_account cascade;
drop table auth_token cascade;
drop function update_last_access_time cascade;
drop function immutable_auth_token_columns cascade;
drop function expire_time_not_older_than_token cascade;
commit;
`),
},
"migrations/11_auth_token.up.sql": {
name: "11_auth_token.up.sql",
bytes: []byte(`
begin;
-- an auth token belongs to 1 and only 1 auth account
-- an auth account can have 0 to many auth tokens
create table auth_token (
public_id wt_public_id primary key,
token bytea not null unique,
-- TODO: Make key_id a foreign key once we have DEKs
key_id text not null
constraint key_id_must_not_be_empty
check(length(trim(key_id)) > 0),
auth_account_id wt_public_id not null
references auth_account(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp,
update_time wt_timestamp,
-- This column is not updated every time this auth token is accessed.
-- It is updated after X minutes from the last time it was updated on
-- a per row basis.
approximate_last_access_time wt_timestamp
constraint last_access_time_must_not_be_after_expiration_time
check(
approximate_last_access_time <= expiration_time
),
expiration_time wt_timestamp
constraint create_time_must_not_be_after_expiration_time
check(
create_time <= expiration_time
)
);
create view auth_token_account as
select at.public_id,
at.token,
at.auth_account_id,
at.create_time,
at.update_time,
at.approximate_last_access_time,
at.expiration_time,
aa.scope_id,
aa.iam_user_id,
aa.auth_method_id
from auth_token as at
inner join auth_account as aa
on at.auth_account_id = aa.public_id;
create or replace function
update_last_access_time()
returns trigger
as $$
begin
if new.approximate_last_access_time is distinct from old.approximate_last_access_time then
new.approximate_last_access_time = now();
end if;
return new;
end;
$$ language plpgsql;
comment on function
update_last_access_time()
is
'function used in before update triggers to properly set last_access_time columns';
create or replace function
immutable_auth_token_columns()
returns trigger
as $$
begin
if new.auth_account_id is distinct from old.auth_account_id then
raise exception 'auth_account_id is read-only';
end if;
if new.token is distinct from old.token then
raise exception 'token is read-only';
end if;
return new;
end;
$$ language plpgsql;
comment on function
immutable_auth_token_columns()
is
'function used in before update triggers to make specific columns immutable';
-- This allows the expiration to be calculated on the server side and still hold the constraint that
-- the expiration time cant be before the creation time of the auth token.
create or replace function
expire_time_not_older_than_token()
returns trigger
as $$
begin
if new.expiration_time < new.create_time then
new.expiration_time = new.create_time;
end if;
return new;
end;
$$ language plpgsql;
comment on function
expire_time_not_older_than_token()
is
'function used in before insert triggers to ensure expiration time is not older than create time';
create trigger
default_create_time_column
before insert on auth_token
for each row execute procedure default_create_time();
create trigger
expire_time_not_older_than_token
before insert on auth_token
for each row execute procedure expire_time_not_older_than_token();
create trigger
update_time_column
before update on auth_token
for each row execute procedure update_time_column();
create trigger
update_last_access_time
before update on auth_token
for each row execute procedure update_last_access_time();
create trigger
immutable_auth_token_columns
before update on auth_token
for each row execute procedure immutable_auth_token_columns();
create trigger
immutable_columns
before
update on auth_token
for each row execute procedure immutable_columns('public_id', 'auth_account_id', 'create_time');
commit;
`),
},
"migrations/12_auth_password.down.sql": {
name: "12_auth_password.down.sql",
bytes: []byte(`
begin;
drop table auth_password_credential;
drop table auth_password_conf cascade;
drop table if exists auth_password_account;
drop table if exists auth_password_method;
drop function insert_auth_password_credential_subtype;
drop function insert_auth_password_conf_subtype;
commit;
`),
},
"migrations/12_auth_password.up.sql": {
name: "12_auth_password.up.sql",
bytes: []byte(`
begin;
/*
┌────────────────┐ ┌──────────────────────┐ ┌────────────────────────────┐
│ auth_method │ │ auth_password_method │ │ auth_password_conf │
├────────────────┤ ├──────────────────────┤ ├────────────────────────────┤
│ public_id (pk) │ │ public_id (pk,fk) │ ╱│ private_id (pk,fk) │
│ scope_id (fk) │┼┼─────────────○┼│ scope_id (fk) │┼┼─────────○─│ password_method_id (fk) │
│ │ │ ... │ ╲│ │
└────────────────┘ └──────────────────────┘ └────────────────────────────┘
┼ ┼ ┼
┼ ┼ ┼
│ │ │
│ ▲fk1 │ ▲fk1 │ ▲fk1
│ │ │
○ ○ ○
╱│╲ ╱│╲ ╱│╲
┌──────────────────────────┐ ┌──────────────────────────┐ ┌───────────────────────────────┐
│ auth_account │ │ auth_password_account │ │ auth_password_credential │
├──────────────────────────┤ ├──────────────────────────┤ ├───────────────────────────────┤
│ public_id (pk) │ │ public_id (pk,fk2) │ │ private_id (pk) │
│ scope_id (fk1) │ ◀fk2 │ scope_id (fk1,fk2) │ ◀fk2 │ password_method_id (fk1,fk2) │
│ auth_method_id (fk1) │┼┼──────○┼│ auth_method_id (fk1,fk2) │┼┼──────○┼│ password_conf_id (fk1) │
│ iam_user_scope_id (fk2) │ │ ... │ │ password_account_id (fk2) │
│ iam_user_id (fk2) │ └──────────────────────────┘ └───────────────────────────────┘
└──────────────────────────┘
An auth_method is a base type. An auth_password_method is an auth_method
subtype. For every row in auth_password_method there is one row in auth_method
with the same public_id and scope_id.
Similarly, an auth_account is a base type. An auth_password_account is an
auth_account subtype. For every row in auth_password_account there is one row
in auth_account with the same public_id, scope_id, and auth_method_id.
Both auth_password_conf and auth_password_credential are base types. Each
password key derivation function will require a auth_password_conf and
auth_password_credential table.
An auth_method can have 0 or 1 auth_password_method.
An auth_account can have 0 or 1 auth_password_account.
An auth_password_method belongs to 1 auth_method.
An auth_password_method can have 0 to many auth_password_accounts.
An auth_password_method can have 0 to many auth_password_confs.
An auth_password_account belongs to 1 auth_account.
An auth_password_account belongs to 1 auth_password_method.
An auth_password_account can have 0 or 1 auth_password_credential.
An auth_password_conf belongs to 1 auth_password_method.
An auth_password_conf can have 0 to many auth_password_credentials.
An auth_password_credential belongs to 1 auth_password_account.
An auth_password_credential belongs to 1 auth_password_conf.
*/
create table auth_password_method (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null,
password_conf_id wt_private_id, -- FK to auth_password_conf added below
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
min_login_name_length int
not null
default 3,
min_password_length int
not null
default 8,
version wt_version,
foreign key (scope_id, public_id)
references auth_method (scope_id, public_id)
on delete cascade
on update cascade,
unique(scope_id, name),
unique(scope_id, public_id)
);
create trigger
update_version_column
after update on auth_password_method
for each row execute procedure update_version_column();
create trigger
insert_auth_method_subtype
before insert on auth_password_method
for each row execute procedure insert_auth_method_subtype();
create table auth_password_account (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
-- NOTE(mgaffney): The scope_id type is not wt_scope_id because the domain
-- check is executed before the insert trigger which retrieves the scope_id
-- causing an insert to fail.
scope_id text not null,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
login_name text not null
constraint login_name_must_be_lowercase
check(lower(trim(login_name)) = login_name)
constraint login_name_must_not_be_empty
check(length(trim(login_name)) > 0),
version wt_version,
foreign key (scope_id, auth_method_id)
references auth_password_method (scope_id, public_id)
on delete cascade
on update cascade,
foreign key (scope_id, auth_method_id, public_id)
references auth_account (scope_id, auth_method_id, public_id)
on delete cascade
on update cascade,
unique(auth_method_id, name),
unique(auth_method_id, login_name),
unique(auth_method_id, public_id)
);
create trigger
update_version_column
after update on auth_password_account
for each row execute procedure update_version_column();
create trigger
insert_auth_account_subtype
before insert on auth_password_account
for each row execute procedure insert_auth_account_subtype();
create table auth_password_conf (
private_id wt_private_id
primary key,
password_method_id wt_public_id
not null
references auth_password_method (public_id)
on delete cascade
on update cascade
deferrable initially deferred,
unique(password_method_id, private_id)
);
alter table auth_password_method
add constraint current_conf_fkey
foreign key (public_id, password_conf_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade
deferrable initially deferred;
-- insert_auth_password_conf_subtype() is a trigger function for subtypes of
-- auth_password_conf
create or replace function
insert_auth_password_conf_subtype()
returns trigger
as $$
begin
insert into auth_password_conf
(private_id, password_method_id)
values
(new.private_id, new.password_method_id);
return new;
end;
$$ language plpgsql;
create table auth_password_credential (
private_id wt_private_id
primary key,
password_account_id wt_public_id
not null
unique,
password_conf_id wt_private_id,
password_method_id wt_public_id
not null,
foreign key (password_method_id, password_conf_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade,
foreign key (password_method_id, password_account_id)
references auth_password_account (auth_method_id, public_id)
on delete cascade
on update cascade,
unique(password_method_id, password_conf_id, password_account_id)
);
-- insert_auth_password_credential_subtype() is a trigger function for
-- subtypes of auth_password_credential
create or replace function
insert_auth_password_credential_subtype()
returns trigger
as $$
begin
select auth_password_account.auth_method_id
into new.password_method_id
from auth_password_account
where auth_password_account.public_id = new.password_account_id;
insert into auth_password_credential
(private_id, password_account_id, password_conf_id, password_method_id)
values
(new.private_id, new.password_account_id, new.password_conf_id, new.password_method_id);
return new;
end;
$$ language plpgsql;
-- update_auth_password_credential_subtype() is an after update trigger
-- function for subtypes of auth_password_credential
create or replace function
update_auth_password_credential_subtype()
returns trigger
as $$
begin
/*
The configuration id of a credential is updated when a credential is
rehashed during authentication.
*/
if new.password_conf_id is distinct from old.password_conf_id then
update auth_password_credential
set password_conf_id = new.password_conf_id
where private_id = new.private_id;
end if;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- delete_auth_password_credential_subtype() is an after delete trigger
-- function for subtypes of auth_password_credential
create or replace function
delete_auth_password_credential_subtype()
returns trigger
as $$
begin
delete
from auth_password_credential
where private_id = old.private_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
--
-- triggers for time columns
--
create trigger
update_time_column
before
update on auth_password_method
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_method
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_method
for each row execute procedure default_create_time();
create trigger
update_time_column
before
update on auth_password_account
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_account
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_account
for each row execute procedure default_create_time();
-- The tickets for oplog are the subtypes not the base types because no updates
-- are done to any values in the base types.
insert into oplog_ticket
(name, version)
values
('auth_password_method', 1),
('auth_password_account', 1),
('auth_password_credential', 1);
commit;
`),
},
"migrations/13_auth_password_argon.down.sql": {
name: "13_auth_password_argon.down.sql",
bytes: []byte(`
begin;
drop table auth_password_argon2_cred;
drop table auth_password_argon2_conf;
commit;
`),
},
"migrations/13_auth_password_argon.up.sql": {
name: "13_auth_password_argon.up.sql",
bytes: []byte(`
begin;
create table auth_password_argon2_conf (
private_id wt_private_id primary key
references auth_password_conf (private_id)
on delete cascade
on update cascade,
password_method_id wt_public_id not null,
create_time wt_timestamp,
iterations int not null default 3
constraint iterations_must_be_greater_than_0
check(iterations > 0),
memory int not null default 65536
constraint memory_must_be_greater_than_0
check(memory > 0),
threads int not null default 1
constraint threads_must_be_greater_than_0
check(threads > 0),
-- salt_length unit is bytes
salt_length int not null default 32
-- minimum of 16 bytes (128 bits)
constraint salt_must_be_at_least_16_bytes
check(salt_length >= 16),
-- key_length unit is bytes
key_length int not null default 32
-- minimum of 16 bytes (128 bits)
constraint key_length_must_be_at_least_16_bytes
check(key_length >= 16),
unique(password_method_id, iterations, memory, threads, salt_length, key_length),
unique (password_method_id, private_id),
foreign key (password_method_id, private_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade
deferrable initially deferred
);
create or replace function
read_only_auth_password_argon2_conf()
returns trigger
as $$
begin
raise exception 'auth_password_argon2_conf is read-only';
end;
$$ language plpgsql;
create trigger
read_only_auth_password_argon2_conf
before
update on auth_password_argon2_conf
for each row execute procedure read_only_auth_password_argon2_conf();
create trigger
insert_auth_password_conf_subtype
before insert on auth_password_argon2_conf
for each row execute procedure insert_auth_password_conf_subtype();
create table auth_password_argon2_cred (
private_id wt_private_id primary key
references auth_password_credential (private_id)
on delete cascade
on update cascade,
password_account_id wt_public_id not null,
password_conf_id wt_private_id,
-- NOTE(mgaffney): The password_method_id type is not wt_public_id because
-- the domain check is executed before the insert trigger which retrieves
-- the password_method_id causing an insert to fail.
password_method_id text not null,
create_time wt_timestamp,
update_time wt_timestamp,
salt bytea not null -- cannot be changed unless derived_key is changed too
constraint salt_must_not_be_empty
check(length(salt) > 0),
derived_key bytea not null
constraint derived_key_must_not_be_empty
check(length(derived_key) > 0),
-- TODO: Make key_id a foreign key once we have DEKs
key_id text not null
constraint key_id_must_not_be_empty
check(length(trim(key_id)) > 0),
foreign key (password_method_id, password_conf_id)
references auth_password_argon2_conf (password_method_id, private_id)
on delete cascade
on update cascade,
foreign key (password_method_id, password_conf_id, password_account_id)
references auth_password_credential (password_method_id, password_conf_id, password_account_id)
on delete cascade
on update cascade
deferrable initially deferred
);
create trigger
insert_auth_password_credential_subtype
before insert on auth_password_argon2_cred
for each row execute procedure insert_auth_password_credential_subtype();
create trigger
update_auth_password_credential_subtype
after update on auth_password_argon2_cred
for each row execute procedure update_auth_password_credential_subtype();
create trigger
delete_auth_password_credential_subtype
after delete on auth_password_argon2_cred
for each row execute procedure delete_auth_password_credential_subtype();
--
-- triggers for time columns
--
create trigger
immutable_columns
before
update on auth_password_argon2_conf
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_argon2_conf
for each row execute procedure default_create_time();
create trigger
update_time_column
before
update on auth_password_argon2_cred
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_argon2_cred
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_argon2_cred
for each row execute procedure default_create_time();
-- The tickets for oplog are the subtypes not the base types because no updates
-- are done to any values in the base types.
insert into oplog_ticket
(name, version)
values
('auth_password_argon2_conf', 1),
('auth_password_argon2_cred', 1);
commit;
`),
},
"migrations/14_auth_password_views.down.sql": {
name: "14_auth_password_views.down.sql",
bytes: []byte(`
begin;
drop view auth_password_current_conf;
drop view auth_password_conf_union;
commit;
`),
},
"migrations/14_auth_password_views.up.sql": {
name: "14_auth_password_views.up.sql",
bytes: []byte(`
begin;
-- auth_password_conf_union is a union of the configuration settings
-- of all supported key derivation functions.
-- It will be updated as new key derivation functions are supported.
create or replace view auth_password_conf_union as
-- Do not change the order of the columns when adding new configurations.
-- Union with new tables appending new columns as needed.
select c.password_method_id, c.private_id as password_conf_id, c.private_id,
'argon2' as conf_type,
c.iterations, c.memory, c.threads, c.salt_length, c.key_length
from auth_password_argon2_conf c;
-- auth_password_current_conf provides a view of the current password
-- configuration for each password auth method.
-- The view will be updated as new key derivation functions are supported
-- but the query to create the view should not need to be updated.
create or replace view auth_password_current_conf as
-- Rerun this query whenever auth_password_conf_union is updated.
select pm.min_login_name_length, pm.min_password_length, c.*
from auth_password_method pm
inner join auth_password_conf_union c
on pm.password_conf_id = c.password_conf_id;
commit;
`),
},
"migrations/20_host.down.sql": {
name: "20_host.down.sql",
bytes: []byte(`
begin;
drop table host_set;
drop table host;
drop table host_catalog;
drop function insert_host_set_subtype;
drop function insert_host_subtype;
drop function insert_host_catalog_subtype;
delete
from oplog_ticket
where name in (
'host_catalog',
'host',
'host_set'
);
commit;
`),
},
"migrations/20_host.up.sql": {
name: "20_host.up.sql",
bytes: []byte(`
begin;
/*
┌─────────────────┐
│ host │
├─────────────────┤
│ public_id (pk) │
│ catalog_id (fk) │
│ │
└─────────────────┘
╲│╱
┌─────────────────┐ ┌─────────────────┐
│ iam_scope │ │ host_catalog │
├─────────────────┤ ├─────────────────┤
│ public_id (pk) │ ╱│ public_id (pk) │
│ │┼┼──────○─│ scope_id (fk) │
│ │ ╲│ │
└─────────────────┘ └─────────────────┘
╱│╲
┌─────────────────┐
│ host_set │
├─────────────────┤
│ public_id (pk) │
│ catalog_id (fk) │
│ │
└─────────────────┘
*/
-- host_catalog
create table host_catalog (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope (public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
create trigger immutable_columns before update on host_catalog
for each row execute procedure immutable_columns('public_id', 'scope_id');
-- insert_host_catalog_subtype() is a before insert trigger
-- function for subtypes of host_catalog
create or replace function insert_host_catalog_subtype()
returns trigger
as $$
begin
insert into host_catalog
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- delete_host_catalog_subtype() is an after delete trigger
-- function for subtypes of host_catalog
create or replace function delete_host_catalog_subtype()
returns trigger
as $$
begin
delete from host_catalog
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- host
create table host (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references host_catalog (public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger immutable_columns before update on host
for each row execute procedure immutable_columns('public_id', 'catalog_id');
-- insert_host_subtype() is a before insert trigger
-- function for subtypes of host
create or replace function insert_host_subtype()
returns trigger
as $$
begin
insert into host
(public_id, catalog_id)
values
(new.public_id, new.catalog_id);
return new;
end;
$$ language plpgsql;
-- delete_host_subtype() is an after delete trigger
-- function for subtypes of host
create or replace function delete_host_subtype()
returns trigger
as $$
begin
delete from host
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- host_set
create table host_set (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references host_catalog (public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger immutable_columns before update on host_set
for each row execute procedure immutable_columns('public_id', 'catalog_id');
-- insert_host_set_subtype() is a before insert trigger
-- function for subtypes of host_set
create or replace function insert_host_set_subtype()
returns trigger
as $$
begin
insert into host_set
(public_id, catalog_id)
values
(new.public_id, new.catalog_id);
return new;
end;
$$ language plpgsql;
-- delete_host_set_subtype() is an after delete trigger
-- function for subtypes of host_set
create or replace function delete_host_set_subtype()
returns trigger
as $$
begin
delete from host_set
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
insert into oplog_ticket (name, version)
values
('host_catalog', 1),
('host', 1),
('host_set', 1);
commit;
`),
},
"migrations/22_static_host.down.sql": {
name: "22_static_host.down.sql",
bytes: []byte(`
begin;
drop table static_host_set_member cascade;
drop table static_host_set cascade;
drop table static_host cascade;
drop table static_host_catalog cascade;
delete
from oplog_ticket
where name in (
'static_host_catalog',
'static_host',
'static_host_set',
'static_host_set_member'
);
commit;
`),
},
"migrations/22_static_host.up.sql": {
name: "22_static_host.up.sql",
bytes: []byte(`
begin;
/*
┌─────────────────┐ ┌─────────────────────┐
│ host │ │ static_host │
├─────────────────┤ ├─────────────────────┤
│ public_id (pk) │ │ public_id (pk) │
│ catalog_id (fk) │┼┼──────○┼│ catalog_id (fk) │┼┼─────────────────────┐
│ │ │ address │ ◀fk1 │
└─────────────────┘ └─────────────────────┘ │
╲│╱ ╲│╱ │
○ ○ │
│ │ │
┼ ┼ ○
┼ ┼ ╱│╲
┌─────────────────┐ ┌─────────────────────┐ ┌────────────────────────┐
│ host_catalog │ │ static_host_catalog │ │ static_host_set_member │
├─────────────────┤ ├─────────────────────┤ ├────────────────────────┤
│ public_id (pk) │ │ public_id (pk) │ │ host_id (pk,fk1) │
│ scope_id (fk) │┼┼──────○┼│ scope_id (fk) │ │ set_id (pk,fk2) │
│ │ │ │ │ catalog_id (fk1,fk2) │
└─────────────────┘ └─────────────────────┘ └────────────────────────┘
┼ ┼ ╲│╱
┼ ┼ ○
│ │ │
○ ○ │
╱│╲ ╱│╲ │
┌─────────────────┐ ┌─────────────────────┐ │
│ host_set │ │ static_host_set │ │
├─────────────────┤ ├─────────────────────┤ │
│ public_id (pk) │ │ public_id (pk) │ ◀fk2 │
│ catalog_id (fk) │┼┼──────○┼│ catalog_id (fk) │┼┼─────────────────────┘
│ │ │ │
└─────────────────┘ └─────────────────────┘
*/
create table static_host_catalog (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope (public_id)
on delete cascade
on update cascade,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
foreign key (scope_id, public_id)
references host_catalog (scope_id, public_id)
on delete cascade
on update cascade,
unique(scope_id, name)
);
create trigger update_version_column after update on static_host_catalog
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host_catalog
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host_catalog
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host_catalog
for each row execute procedure immutable_columns('public_id', 'scope_id','create_time');
create trigger insert_host_catalog_subtype before insert on static_host_catalog
for each row execute procedure insert_host_catalog_subtype();
create trigger delete_host_catalog_subtype after delete on static_host_catalog
for each row execute procedure delete_host_catalog_subtype();
create table static_host (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references static_host_catalog (public_id)
on delete cascade
on update cascade,
name text,
description text,
address text not null
constraint address_must_be_more_than_2_characters
check(length(trim(address)) > 2)
constraint address_must_be_less_than_256_characters
check(length(trim(address)) < 256),
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(catalog_id, name),
foreign key (catalog_id, public_id)
references host (catalog_id, public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(catalog_id, public_id)
);
create trigger update_version_column after update on static_host
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host
for each row execute procedure immutable_columns('public_id', 'catalog_id','create_time');
create trigger insert_host_subtype before insert on static_host
for each row execute procedure insert_host_subtype();
create trigger delete_host_subtype after delete on static_host
for each row execute procedure delete_host_subtype();
create table static_host_set (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references static_host_catalog (public_id)
on delete cascade
on update cascade,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(catalog_id, name),
foreign key (catalog_id, public_id)
references host_set (catalog_id, public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger update_version_column after update on static_host_set
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host_set
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host_set
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host_set
for each row execute procedure immutable_columns('public_id', 'catalog_id','create_time');
create trigger insert_host_set_subtype before insert on static_host_set
for each row execute procedure insert_host_set_subtype();
create trigger delete_host_set_subtype after delete on static_host_set
for each row execute procedure delete_host_set_subtype();
create table static_host_set_member (
host_id wt_public_id not null,
set_id wt_public_id not null,
catalog_id wt_public_id not null,
primary key(host_id, set_id),
foreign key (catalog_id, host_id) -- fk1
references static_host (catalog_id, public_id)
on delete cascade
on update cascade,
foreign key (catalog_id, set_id) -- fk2
references static_host_set (catalog_id, public_id)
on delete cascade
on update cascade
);
create trigger immutable_columns before update on static_host_set_member
for each row execute procedure immutable_columns('host_id', 'set_id', 'catalog_id');
create or replace function insert_static_host_set_member()
returns trigger
as $$
begin
select static_host_set.catalog_id
into new.catalog_id
from static_host_set
where static_host_set.public_id = new.set_id;
return new;
end;
$$ language plpgsql;
create trigger insert_static_host_set_member before insert on static_host_set_member
for each row execute procedure insert_static_host_set_member();
insert into oplog_ticket (name, version)
values
('static_host_catalog', 1),
('static_host', 1),
('static_host_set', 1),
('static_host_set_member', 1);
commit;
`),
},
"migrations/30_keys.down.sql": {
name: "30_keys.down.sql",
bytes: []byte(`
begin;
drop function kms_version_column cascade;
commit;
`),
},
"migrations/30_keys.up.sql": {
name: "30_keys.up.sql",
bytes: []byte(`
begin;
-- kms_version_column() will increment the version column whenever row data
-- is inserted and should only be used in an before insert trigger. This
-- function will overwrite any explicit values to the version column.
create or replace function
kms_version_column()
returns trigger
as $$
declare
_key_id text;
_max bigint;
begin
execute format('SELECT $1.%I', tg_argv[0]) into _key_id using new;
execute format('select max(version) + 1 from %I where %I = $1', tg_relid::regclass, tg_argv[0]) using _key_id into _max;
if _max is null then
_max = 1;
end if;
new.version = _max;
return new;
end;
$$ language plpgsql;
comment on function
kms_version_column()
is
'function used in before insert triggers to properly set version columns for kms_* tables with a version column';
commit;
`),
},
"migrations/31_keys.down.sql": {
name: "31_keys.down.sql",
bytes: []byte(`
begin;
drop table kms_root_key cascade;
drop table kms_root_key_version cascade;
drop table kms_database_key cascade;
drop table kms_database_key_version cascade;
drop table kms_oplog_key cascade;
drop table kms_oplog_key_version cascade;
drop table kms_session_key cascade;
drop table kms_session_key_version cascade;
commit;
`),
},
"migrations/31_keys.up.sql": {
name: "31_keys.up.sql",
bytes: []byte(`
begin;
/*
┌────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
├────────────────────────────────────────────────────────────────────────────────────────────┐ ○
├────────────────────────────────────────────────────────────────┐ ○ ┼
├────────────────────────────────────┐ ○ ┼ ┌────────────────────────┐
│ ○ ┼ ┌────────────────────────┐ │ kms_token_key │
┼ ┼ ┌────────────────────────┐ │ kms_session_key │ ├────────────────────────┤
┌────────────────────────┐ ┌────────────────────────┐ │ kms_oplog_key │ ├────────────────────────┤ │private_id │
│ kms_root_key │ │ kms_database_key │ ├────────────────────────┤ │private_id │ │root_key_id │
├────────────────────────┤ ├────────────────────────┤ │private_id │ │root_key_id │ │ │
│private_id │ │private_id │ │root_key_id │ │ │ │ │
│scope_id │ │root_key_id │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ └────────────────────────┘
└────────────────────────┘ └────────────────────────┘ └────────────────────────┘ └────────────────────────┘ ┼
┼ ┼ ┼ ┼ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┼ ┼ ┼ ┼ │
╱│╲ ╱│╲ ╱│╲ ╱│╲ ┼
┌────────────────────────┐ ┌────────────────────────┐ ┌────────────────────────┐ ┌────────────────────────┐ ╱│╲
│ kms_root_key_version │ │kms_database_key_version│ │ kms_oplog_key_version │ │kms_session_key_version │ ┌────────────────────────┐
├────────────────────────┤ ├────────────────────────┤ ├────────────────────────┤ ├────────────────────────┤ │ kms_token_key_version │
│private_id │ │private_id │ │private_id │ │private_id │ ├────────────────────────┤
│root_key_id │ │database_key_id │ │oplog_key_id │ │session_key_id │ │private_id │
│key │ │root_key_id │ │root_key_id │ │root_key_id │ │token_key_id │
│version │ │key │ │key │ │key │ │root_key_id │
│ │ │version │ │version │ │version │ │key │
└────────────────────────┘ └────────────────────────┘ │ │ │ │ │version │
┼ ┼ └────────────────────────┘ │ │ │ │
│ ○ ┼ └────────────────────────┘ │ │
├────────────────────────────────────┘ ○ ┼ └────────────────────────┘
├────────────────────────────────────────────────────────────────┘ ○ ┼
├────────────────────────────────────────────────────────────────────────────────────────────┘ ○
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
*/
create table kms_root_key (
private_id wt_private_id primary key,
scope_id wt_scope_id not null unique -- there can only be one root key for a scope.
references iam_scope(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_root_key (all of them)
create trigger
immutable_columns
before
update on kms_root_key
for each row execute procedure immutable_columns('private_id', 'scope_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_root_key
for each row execute procedure default_create_time();
create table kms_root_key_version (
private_id wt_private_id primary key,
root_key_id wt_private_id not null
references kms_root_key(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(root_key_id, version)
);
-- define the immutable fields for kms_root_key_version (all of them)
create trigger
immutable_columns
before
update on kms_root_key_version
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_root_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_root_key_version
for each row execute procedure kms_version_column('root_key_id');
create table kms_database_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one database dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_database_key (all of them)
create trigger
immutable_columns
before
update on kms_database_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_database_key
for each row execute procedure default_create_time();
create table kms_database_key_version (
private_id wt_private_id primary key,
database_key_id wt_private_id not null
references kms_database_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(database_key_id, version)
);
-- define the immutable fields for kms_database_key_version (all of them)
create trigger
immutable_columns
before
update on kms_database_key_version
for each row execute procedure immutable_columns('private_id', 'database_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_database_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_database_key_version
for each row execute procedure kms_version_column('database_key_id');
create table kms_oplog_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one oplog dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_oplog_key (all of them)
create trigger
immutable_columns
before
update on kms_oplog_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_oplog_key
for each row execute procedure default_create_time();
create table kms_oplog_key_version (
private_id wt_private_id primary key,
oplog_key_id wt_private_id not null
references kms_oplog_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(oplog_key_id, version)
);
-- define the immutable fields for kms_oplog_key_version (all of them)
create trigger
immutable_columns
before
update on kms_oplog_key_version
for each row execute procedure immutable_columns('private_id', 'oplog_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_oplog_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_oplog_key_version
for each row execute procedure kms_version_column('oplog_key_id');
create table kms_session_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one session dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_oplog_key (all of them)
create trigger
immutable_columns
before
update on kms_session_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_session_key
for each row execute procedure default_create_time();
create table kms_session_key_version (
private_id wt_private_id primary key,
session_key_id wt_private_id not null
references kms_session_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(session_key_id, version)
);
-- define the immutable fields for kms_session_key_version (all of them)
create trigger
immutable_columns
before
update on kms_session_key_version
for each row execute procedure immutable_columns('private_id', 'session_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_session_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_session_key_version
for each row execute procedure kms_version_column('session_key_id');
create table kms_token_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one token dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_token_key (all of them)
create trigger
immutable_columns
before
update on kms_token_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_token_key
for each row execute procedure default_create_time();
create table kms_token_key_version (
private_id wt_private_id primary key,
token_key_id wt_private_id not null
references kms_token_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(token_key_id, version)
);
-- define the immutable fields for kms_token_key_version (all of them)
create trigger
immutable_columns
before
update on kms_token_key_version
for each row execute procedure immutable_columns('private_id', 'token_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_token_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_token_key_version
for each row execute procedure kms_version_column('token_key_id');
insert into oplog_ticket
(name, version)
values
('kms_root_key', 1),
('kms_root_key_version', 1);
commit;
`),
},
"migrations/40_targets.down.sql": {
name: "40_targets.down.sql",
bytes: []byte(`
begin;
drop function insert_target_subtype;
drop function delete_target_subtype;
drop function target_scope_valid;
drop function target_host_set_scope_valid
commit;
`),
},
"migrations/40_targets.up.sql": {
name: "40_targets.up.sql",
bytes: []byte(`
begin;
-- insert_target_subtype() is a before insert trigger
-- function for subtypes of target
create or replace function
insert_target_subtype()
returns trigger
as $$
begin
insert into target
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- delete_target_subtype() is an after delete trigger
-- function for subtypes of host
create or replace function delete_target_subtype()
returns trigger
as $$
begin
delete from target
where
public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- target_scope_valid() is a before insert trigger function for target
create or replace function
target_scope_valid()
returns trigger
as $$
declare scope_type text;
begin
-- Fetch the type of scope
select isc.type from iam_scope isc where isc.public_id = new.scope_id into scope_type;
if scope_type = 'project' then
return new;
end if;
raise exception 'invalid target scope type % (must be project)', scope_type;
end;
$$ language plpgsql;
-- target_host_set_scope_valid() is a before insert trigger function for target_host_set
create or replace function
target_host_set_scope_valid()
returns trigger
as $$
begin
perform from
host_catalog hc,
host_set hs,
target t,
iam_scope s
where
hc.public_id = hs.catalog_id and
hc.scope_id = t.scope_id and
t.public_id = new.target_id;
if not found then
raise exception 'target scope and host set scope are not equal';
end if;
return new;
end;
$$ language plpgsql;
commit;
`),
},
"migrations/41_targets.down.sql": {
name: "41_targets.down.sql",
bytes: []byte(`
begin;
drop table target cascade;
drop table target_host_set cascade;
drop table target_tcp;
drop view target_all_subtypes;
drop view target_host_set_catalog;
delete
from oplog_ticket
where name in (
'target_tcp'
);
commit;
`),
},
"migrations/41_targets.up.sql": {
name: "41_targets.up.sql",
bytes: []byte(`
/*
┌─────────────────┐
┌─────────────────┐ │ target_tcp │
│ target │ ├─────────────────┤
├─────────────────┤ │public_id │
│public_id │┼─────────────○┼│scope_id │
│scope_id │ │default_port │
│ │ │name (not null) │
└─────────────────┘ │description │
┼ └─────────────────┘
╱│╲
┌─────────────────┐
│ target_host_set │
├─────────────────┤
│target_id │
│host_set_id │
│ │
└─────────────────┘
╲│╱
┌─────────────────┐
│ host_set │
├─────────────────┤
│public_id │
│catalog_id │
│ │
└─────────────────┘
*/
begin;
create table target (
public_id wt_public_id primary key,
scope_id wt_scope_id not null
references iam_scope(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
create trigger
immutable_columns
before
update on target
for each row execute procedure immutable_columns('public_id', 'scope_id', 'create_time');
create trigger
default_create_time_column
before
insert on target
for each row execute procedure default_create_time();
create trigger
target_scope_valid
before insert on target
for each row execute procedure target_scope_valid();
create table target_host_set(
target_id wt_public_id
references target(public_id)
on delete cascade
on update cascade,
host_set_id wt_public_id
references host_set(public_id)
on delete cascade
on update cascade,
primary key(target_id, host_set_id),
create_time wt_timestamp
);
create trigger
immutable_columns
before
update on target_host_set
for each row execute procedure immutable_columns('target_id', 'host_set_id', 'create_time');
create trigger
target_host_set_scope_valid
before
insert on target_host_set
for each row execute procedure target_host_set_scope_valid();
create table target_tcp (
public_id wt_public_id primary key
references target(public_id)
on delete cascade
on update cascade,
scope_id wt_scope_id not null
references iam_scope(public_id)
on delete cascade
on update cascade,
name text not null, -- name is not optional for a target subtype
description text,
default_port int, -- default_port can be null
-- max duration of the session in seconds.
-- default is 8 hours
session_max_seconds int not null default 28800
constraint session_max_seconds_must_be_greater_than_0
check(session_max_seconds > 0),
-- limit on number of session connections allowed. -1 equals no limit
session_connection_limit int not null default 1
constraint session_connection_limit_must_be_greater_than_0_or_negative_1
check(session_connection_limit > 0 or session_connection_limit = -1),
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(scope_id, name) -- name must be unique within a scope
);
create trigger
insert_target_subtype
before insert on target_tcp
for each row execute procedure insert_target_subtype();
create trigger
delete_target_subtype
after delete on target_tcp
for each row execute procedure delete_target_subtype();
-- define the immutable fields for target
create trigger
immutable_columns
before
update on target_tcp
for each row execute procedure immutable_columns('public_id', 'scope_id', 'create_time');
create trigger
update_version_column
after update on target_tcp
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on target_tcp
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on target_tcp
for each row execute procedure default_create_time();
create trigger
target_scope_valid
before insert on target_tcp
for each row execute procedure target_scope_valid();
-- target_all_subtypes is a union of all target subtypes
create view target_all_subtypes
as
select
public_id,
scope_id,
name,
description,
default_port,
session_max_seconds,
session_connection_limit,
version,
create_time,
update_time,
'tcp' as type
from target_tcp;
create view target_set
as
select
hs.public_id,
hs.catalog_id,
ths.target_id
from
target_host_set ths,
host_set hs
where
hs.public_id = ths.host_set_id;
insert into oplog_ticket
(name, version)
values
('target_tcp', 1);
commit;
`),
},
"migrations/50_session.down.sql": {
name: "50_session.down.sql",
bytes: []byte(`
begin;
drop table session_state;
drop table session_state_enm;
drop table session;
drop table session_termination_reason_enm;
drop function insert_session_state;
drop function insert_new_session_state;
drop function insert_session;
drop function update_session_state_on_termination_reason;
drop function insert_session_state;
delete
from oplog_ticket
where name in (
'session'
);
commit;
`),
},
"migrations/50_session.up.sql": {
name: "50_session.up.sql",
bytes: []byte(`
begin;
/*
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ iam_scope_org │ │ iam_user │ │ auth_token │
├─────────────────┤ ├─────────────────┤ ├─────────────────┤
│ public_id (pk) │ │ public_id (pk) │ │ public_id (pk) │
│ │ │ │ │ │
└─────────────────┘ └─────────────────┘ └─────────────────┘
▲fk7 ┼ ▲fk1 ┼ ▲fk6 ┼
┼ ┼ ┼
├─────────────────────────────────┴─────────────────────┘
╱│╲
┌──────────────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ session │╲ fk3▶ │ server │ │ target │
├──────────────────────────┤─○──────○┼├─────────────────┤ ├─────────────────┤
│ public_id (pk) │╱ │ private_id (pk) │ │ public_id (pk) │
│ user_id (fk1) │ │ type (pk) │ │ │
│ host_id (fk2) │ └─────────────────┘ └─────────────────┘
│ server_id (fk3) │ ▲fk4 ┼
│ server_type (fk3) │╲ ┼
│ target_id (fk4) │─○─────────────────┬─────────────────────┤
│ host_set_id (fk5) │╱ ┼ ┼
│ auth_token_id (fk6) │ ▼fk5 ┼ ▼fk2 ┼
│ scope_id (fk7) │ ┌─────────────────┐ ┌─────────────────┐
│ termination_reason (fk8) │ │ host_set │ │ host │
└──────────────────────────┘ ├─────────────────┤ ├─────────────────┤
▲fk1 ┼ ╲│╱ │ public_id (pk) │ │ public_id (pk) │
┼ ○ │ │ │ │
│ │ └─────────────────┘ └─────────────────┘
│ │
└─┐ │
│ │ ┌───────────────────────────────┐
│ │ │session_termination_reason_enm │
│ │ fk8▶ ├───────────────────────────────┤
┼ └──────────○┼│ name │
╱│╲ └───────────────────────────────┘
┌──────────────────────────────────────────┐
│ session_state │
├──────────────────────────────────────────┤┼○┐
│ session_id (pk,fk1,fk2,unq1,unq2) │ │◀fk2
│ state (fk3) │ │
│ previous_end_time (fk2,unq1) │┼○┘
│ start_time (pk) │
│ end_time (unq2) │
└──────────────────────────────────────────┘
╲│╱
▼fk3 ┼
┌───────────────────────────────┐
│ session_state_enm │
├───────────────────────────────┤
│ name │
└───────────────────────────────┘
*/
create table session_termination_reason_enm (
name text primary key
constraint only_predefined_session_termination_reasons_allowed
check (
name in (
'unknown',
'timed out',
'closed by end-user',
'terminated',
'network error',
'system error',
'connection limit',
'canceled'
)
)
);
insert into session_termination_reason_enm (name)
values
('unknown'),
('timed out'),
('closed by end-user'),
('terminated'),
('network error'),
('system error'),
('connection limit'),
('canceled');
create table session (
public_id wt_public_id primary key,
-- the user of the session
user_id text -- fk1
-- not using the wt_user_id domain type because it is marked 'not null'
references iam_user (public_id)
on delete set null
on update cascade,
-- the host the user is connected to via this session
host_id wt_public_id -- fk2
references host (public_id)
on delete set null
on update cascade,
-- the worker proxying the connection between the user and the host
server_id text, -- fk3
server_type text,-- fk3
foreign key (server_id, server_type)
references server (private_id, type)
on delete set null
on update cascade,
-- the target the host was chosen from and the user was authorized to
-- connect to
target_id wt_public_id -- fk4
references target (public_id)
on delete set null
on update cascade,
-- the host set the host was chosen from and the user was authorized to
-- connect to via the target
host_set_id wt_public_id -- fk5
references host_set (public_id)
on delete set null
on update cascade,
-- the auth token of the user when this session was created
auth_token_id wt_public_id -- fk6
references auth_token (public_id)
on delete set null
on update cascade,
-- the project which owns this session
scope_id wt_scope_id -- fk7
references iam_scope_project (scope_id)
on delete set null
on update cascade,
-- Certificate to use when connecting (or if using custom certs, to
-- serve as the "login"). Raw DER bytes.
certificate bytea not null,
-- after this time the connection will be expired, e.g. forcefully terminated
expiration_time wt_timestamp, -- maybe null
-- limit on number of session connections allowed. default of 0 equals no limit
connection_limit int not null default 1
constraint connection_limit_must_be_greater_than_0_or_negative_1
check(connection_limit > 0 or connection_limit = -1),
-- trust of first use token
tofu_token bytea, -- will be null when session is first created
-- the reason this session ended (null until terminated)
-- TODO: Make key_id a foreign key once we have DEKs
key_id text, -- will be null on insert
-- references kms_database_key_version(private_id)
-- on delete restrict
-- on update cascade,
termination_reason text -- fk8
references session_termination_reason_enm (name)
on delete restrict
on update cascade,
version wt_version,
create_time wt_timestamp,
update_time wt_timestamp,
endpoint text -- not part of the warehouse, used to send info to the worker
);
create trigger
immutable_columns
before
update on session
for each row execute procedure immutable_columns('public_id', 'certificate', 'expiration_time', 'connection_limit', 'create_time', 'endpoint');
-- session table has some cascades of FK to null, so we need to be careful
-- which columns trigger an update of the version column
create trigger
update_version_column
after update of version, termination_reason, key_id, tofu_token, server_id, server_type on session
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on session
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on session
for each row execute procedure default_create_time();
create or replace function
insert_session()
returns trigger
as $$
begin
case
when new.user_id is null then
raise exception 'user_id is null';
when new.host_id is null then
raise exception 'host_id is null';
when new.target_id is null then
raise exception 'target_id is null';
when new.host_set_id is null then
raise exception 'host_set_id is null';
when new.auth_token_id is null then
raise exception 'auth_token_id is null';
when new.scope_id is null then
raise exception 'scope_id is null';
when new.endpoint is null then
raise exception 'endpoint is null';
else
end case;
return new;
end;
$$ language plpgsql;
create trigger
insert_session
before insert on session
for each row execute procedure insert_session();
create or replace function
insert_new_session_state()
returns trigger
as $$
begin
insert into session_state (session_id, state)
values
(new.public_id, 'pending');
return new;
end;
$$ language plpgsql;
create trigger
insert_new_session_state
after insert on session
for each row execute procedure insert_new_session_state();
-- update_connection_state_on_closed_reason() is used in an update insert trigger on the
-- session_connection table. it will valiadate that all the session's
-- connections are closed, and then insert a state of "closed" in
-- session_connection_state for the closed session connection.
create or replace function
update_session_state_on_termination_reason()
returns trigger
as $$
begin
if new.termination_reason is not null then
perform from
session
where
public_id = new.public_id and
public_id not in (
select session_id
from session_connection
where
public_id in (
select connection_id
from session_connection_state
where
state != 'closed' and
end_time is null
)
);
if not found then
raise 'session %s has open connections', new.public_id;
end if;
-- check to see if there's a terminated state already, before inserting a
-- new one.
perform from
session_state ss
where
ss.session_id = new.public_id and
ss.state = 'terminated';
if found then
return new;
end if;
insert into session_state (session_id, state)
values
(new.public_id, 'terminated');
end if;
return new;
end;
$$ language plpgsql;
create trigger
update_session_state_on_termination_reason
after update of termination_reason on session
for each row execute procedure update_session_state_on_termination_reason();
-- cancel_session will insert a cancel state for the session, if there's isn't
-- a canceled state already. It's used by cancel_session_with_null_fk.
create or replace function
cancel_session(in sessionId text) returns void
as $$
declare
rows_affected numeric;
begin
insert into session_state(session_id, state)
select
sessionId::text, 'canceling'
from
session s
where
s.public_id = sessionId::text and
s.public_id not in (
select
session_id
from
session_state
where
session_id = sessionId::text and
state = 'canceling'
) limit 1;
get diagnostics rows_affected = row_count;
if rows_affected > 1 then
raise exception 'cancel session: more than one row affected: %', rows_affected;
end if;
end;
$$ language plpgsql;
-- cancel_session_with_null_fk is intended to be a before update trigger that
-- sets the session's state to cancel if a FK is set to null.
create or replace function
cancel_session_with_null_fk()
returns trigger
as $$
begin
case
when new.user_id is null then
perform cancel_session(new.public_id);
when new.host_id is null then
perform cancel_session(new.public_id);
when new.target_id is null then
perform cancel_session(new.public_id);
when new.host_set_id is null then
perform cancel_session(new.public_id);
when new.auth_token_id is null then
perform cancel_session(new.public_id);
when new.scope_id is null then
perform cancel_session(new.public_id);
end case;
return new;
end;
$$ language plpgsql;
create trigger
cancel_session_with_null_fk
before update of user_id, host_id, target_id, host_set_id, auth_token_id, scope_id on session
for each row execute procedure cancel_session_with_null_fk();
create table session_state_enm (
name text primary key
constraint only_predefined_session_states_allowed
check (
name in ('pending', 'active', 'canceling', 'terminated')
)
);
insert into session_state_enm (name)
values
('pending'),
('active'),
('canceling'),
('terminated');
/*
┌────────────────┐
start │ │
. │ Canceling │
(●) ┌────▶│ │─────┐
' │ │ │ │
│ │ └────────────────┘ │
│ │ │
▼ │ ▼
┌────────────────┐ ┌────────────────┐ ┌────────────────┐
│ │ │ │ │ │
│ Pending │ │ Active │ │ Terminated │
│ │──────────▶│ │──────────▶│ │
│ │ │ │ │ │
└────────────────┘ └────────────────┘ └────────────────┘
│ │
│ │
│ │
│ │
└──────────────────────▶ ◉ End ◀────────────────────────┘
*/
-- Design influenced by:
-- Joe Celko's SQL for Smarties: Advanced SQL Programming, 5th edition
-- Chapter 12, p270
create table session_state (
session_id wt_public_id not null -- fk1, fk2
references session (public_id)
on delete cascade
on update cascade,
state text not null -- fk3
references session_state_enm(name)
on delete restrict
on update cascade,
previous_end_time timestamp with time zone, -- fk2 -- null means first state
start_time timestamp with time zone default current_timestamp not null,
constraint previous_end_time_and_start_time_in_sequence
check (previous_end_time <= start_time),
end_time timestamp with time zone, -- null means unfinished current state
constraint start_and_end_times_in_sequence
check (start_time <= end_time),
constraint end_times_in_sequence
check (previous_end_time <> end_time),
primary key (session_id, start_time),
unique (session_id, previous_end_time), -- null means first state
unique (session_id, end_time), -- one null current state
foreign key (session_id, previous_end_time) -- self-reference
references session_state (session_id, end_time)
);
create trigger
immutable_columns
before
update on session_state
for each row execute procedure immutable_columns('session_id', 'state', 'start_time', 'previous_end_time');
create or replace function
insert_session_state()
returns trigger
as $$
begin
update session_state
set end_time = now()
where session_id = new.session_id
and end_time is null;
if not found then
new.previous_end_time = null;
new.start_time = now();
new.end_time = null;
return new;
end if;
new.previous_end_time = now();
new.start_time = now();
new.end_time = null;
return new;
end;
$$ language plpgsql;
create trigger insert_session_state before insert on session_state
for each row execute procedure insert_session_state();
create view session_with_state as
select
s.public_id,
s.user_id,
s.host_id,
s.server_id,
s.server_type,
s.target_id,
s.host_set_id,
s.auth_token_id,
s.scope_id,
s.certificate,
s.expiration_time,
s.connection_limit,
s.tofu_token,
s.key_id,
s.termination_reason,
s.version,
s.create_time,
s.update_time,
s.endpoint,
ss.state,
ss.previous_end_time,
ss.start_time,
ss.end_time
from
session s,
session_state ss
where
s.public_id = ss.session_id;
commit;
`),
},
"migrations/51_connection.down.sql": {
name: "51_connection.down.sql",
bytes: []byte(`
begin;
drop table session_connection_state;
drop table session_connection_state_enm;
drop table session_connection;
drop table session_connection_closed_reason_enm;
drop function insert_session_connection_state;
drop function insert_new_connection_state;
drop function update_connection_state_on_closed_reason;
commit;
`),
},
"migrations/51_connection.up.sql": {
name: "51_connection.up.sql",
bytes: []byte(`
begin;
/*
┌────────────────┐
│ session │
├────────────────┤
│ public_id (pk) │
│ │
│ │
└────────────────┘
▲fk1 ┼
╱│╲ ┌──────────────────────────────────────┐
┌───────────────────────┐ │ session_connection_closed_reason_enm │
│ session_connection │╲ fk2▶ ├──────────────────────────────────────┤
├───────────────────────┤─○───○┼│ name │
│ public_id (pk) │╱ └──────────────────────────────────────┘
│ session_id (fk1) │
│ closed_reason (fk2) │
└───────────────────────┘
▲fk1 ┼
╱│╲
┌──────────────────────────────────────────┐
│ session_connection_state │
├──────────────────────────────────────────┤┼○┐
│ connection_id (pk,fk1,fk2,unq1,unq2) │ │◀fk2
│ state (fk3) │ │
│ previous_end_time (fk2,unq1) │┼○┘
│ start_time (pk) │
│ end_time (unq2) │
└──────────────────────────────────────────┘
╲│╱
▼fk3 ┼
┌───────────────────────────────┐
│ session_connection_state_enm │
├───────────────────────────────┤
│ name │
└───────────────────────────────┘
*/
create table session_connection_closed_reason_enm (
name text primary key
constraint only_predefined_session_connection_closed_reasons_allowed
check (
name in (
'unknown',
'timed out',
'closed by end-user',
'canceled',
'network error',
'system error'
)
)
);
insert into session_connection_closed_reason_enm (name)
values
('unknown'),
('timed out'),
('closed by end-user'),
('canceled'),
('network error'),
('system error');
-- A session connection is one connection proxied by a worker from a client to
-- a endpoint for a session. The client initiates the connection to the worker
-- and the worker initiates the connection to the endpoint.
-- A session can have zero or more session connections.
create table session_connection (
public_id wt_public_id primary key,
session_id wt_public_id not null
references session (public_id)
on delete cascade
on update cascade,
-- the client_tcp_address is the network address of the client which initiated
-- the connection to a worker
client_tcp_address inet, -- maybe null on insert
-- the client_tcp_port is the network port at the address of the client the
-- worker proxied a connection for the user
client_tcp_port integer -- maybe null on insert
constraint client_tcp_port_must_be_greater_than_0
check(client_tcp_port > 0)
constraint client_tcp_port_must_less_than_or_equal_to_65535
check(client_tcp_port <= 65535),
-- the endpoint_tcp_address is the network address of the endpoint which the
-- worker initiated the connection to, for the user
endpoint_tcp_address inet, -- maybe be null on insert
-- the endpoint_tcp_port is the network port at the address of the endpoint the
-- worker proxied a connection to, for the user
endpoint_tcp_port integer -- maybe null on insert
constraint endpoint_tcp_port_must_be_greater_than_0
check(endpoint_tcp_port > 0)
constraint endpoint_tcp_port_must_less_than_or_equal_to_65535
check(endpoint_tcp_port <= 65535),
-- the total number of bytes received by the worker from the client and sent
-- to the endpoint for this connection
bytes_up bigint -- can be null
constraint bytes_up_must_be_null_or_a_non_negative_number
check (
bytes_up is null
or
bytes_up >= 0
),
-- the total number of bytes received by the worker from the endpoint and sent
-- to the client for this connection
bytes_down bigint -- can be null
constraint bytes_down_must_be_null_or_a_non_negative_number
check (
bytes_down is null
or
bytes_down >= 0
),
closed_reason text
references session_connection_closed_reason_enm (name)
on delete restrict
on update cascade,
version wt_version,
create_time wt_timestamp,
update_time wt_timestamp
);
create trigger
immutable_columns
before
update on session_connection
for each row execute procedure immutable_columns('public_id', 'session_id', 'create_time');
create trigger
update_version_column
after update on session_connection
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on session_connection
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on session_connection
for each row execute procedure default_create_time();
-- insert_new_connection_state() is used in an after insert trigger on the
-- session_connection table. it will insert a state of "authorized" in
-- session_connection_state for the new session connection.
create or replace function
insert_new_connection_state()
returns trigger
as $$
begin
insert into session_connection_state (connection_id, state)
values
(new.public_id, 'authorized');
return new;
end;
$$ language plpgsql;
create trigger
insert_new_connection_state
after insert on session_connection
for each row execute procedure insert_new_connection_state();
-- update_connection_state_on_closed_reason() is used in an update trigger on the
-- session_connection table. it will insert a state of "closed" in
-- session_connection_state for the closed session connection.
create or replace function
update_connection_state_on_closed_reason()
returns trigger
as $$
begin
if new.closed_reason is not null then
-- check to see if there's a closed state already, before inserting a
-- new one.
perform from
session_connection_state cs
where
cs.connection_id = new.public_id and
cs.state = 'closed';
if not found then
insert into session_connection_state (connection_id, state)
values
(new.public_id, 'closed');
end if;
-- whenever we close a connection, we want to terminate the session if
-- possible.
perform terminate_session_if_possible(new.session_id);
end if;
return new;
end;
$$ language plpgsql;
create trigger
update_connection_state_on_closed_reason
after update of closed_reason on session_connection
for each row execute procedure update_connection_state_on_closed_reason();
create table session_connection_state_enm (
name text primary key
constraint only_predefined_session_connection_states_allowed
check (
name in ('authorized', 'connected', 'closed')
)
);
insert into session_connection_state_enm (name)
values
('authorized'),
('connected'),
('closed');
create table session_connection_state (
connection_id wt_public_id not null
references session_connection (public_id)
on delete cascade
on update cascade,
state text not null
references session_connection_state_enm(name)
on delete restrict
on update cascade,
previous_end_time timestamp with time zone, -- fk2 -- null means first state
start_time timestamp with time zone default current_timestamp not null,
constraint previous_end_time_and_start_time_in_sequence
check (previous_end_time <= start_time),
end_time timestamp with time zone, -- null means unfinished current state
constraint start_and_end_times_in_sequence
check (start_time <= end_time),
constraint end_times_in_sequence
check (previous_end_time <> end_time),
primary key (connection_id, start_time),
unique (connection_id, previous_end_time), -- null means first state
unique (connection_id, end_time), -- one null current state
foreign key (connection_id, previous_end_time) -- self-reference
references session_connection_state (connection_id, end_time)
);
create trigger
immutable_columns
before
update on session_connection_state
for each row execute procedure immutable_columns('connection_id', 'state', 'start_time', 'previous_end_time');
create or replace function
insert_session_connection_state()
returns trigger
as $$
begin
update session_connection_state
set end_time = now()
where connection_id = new.connection_id
and end_time is null;
if not found then
new.previous_end_time = null;
new.start_time = now();
new.end_time = null;
return new;
end if;
new.previous_end_time = now();
new.start_time = now();
new.end_time = null;
return new;
end;
$$ language plpgsql;
create trigger insert_session_connection_state before insert on session_connection_state
for each row execute procedure insert_session_connection_state();
-- terminate_session_if_possible takes a session id and terminates the session
-- if the following conditions are met:
-- * the session is expired and all its connections are closed.
-- * the session is canceling and all its connections are closed
-- * the session has exhausted its connection limit and all its connections
-- are closed.
--
-- Note: this function should align closely with the domain function
-- TerminateCompletedSessions
create or replace function
terminate_session_if_possible(terminate_session_id text)
returns void
as $$
begin
-- is terminate_session_id in a canceling state
with canceling_session(session_id) as
(
select
session_id
from
session_state ss
where
ss.session_id = terminate_session_id and
ss.state = 'canceling' and
ss.end_time is null
)
update session us
set termination_reason =
case
-- timed out sessions
when now() > us.expiration_time then 'timed out'
-- canceling sessions
when us.public_id in(
select
session_id
from
canceling_session cs
where
us.public_id = cs.session_id
) then 'canceled'
-- default: session connection limit reached.
else 'connection limit'
end
where
-- limit update to just the terminating_session_id
us.public_id = terminate_session_id and
termination_reason is null and
-- session expired or connection limit reached
(
-- expired sessions...
now() > us.expiration_time or
-- connection limit reached...
(
-- handle unlimited connections...
connection_limit != -1 and
(
select count (*)
from session_connection sc
where
sc.session_id = us.public_id
) >= connection_limit
) or
-- canceled sessions
us.public_id in (
select
session_id
from
canceling_session cs
where
us.public_id = cs.session_id
)
) and
-- make sure there are no existing connections
us.public_id not in (
select
session_id
from
session_connection
where public_id in (
select
connection_id
from
session_connection_state
where
state != 'closed' and
end_time is null
)
);
end;
$$ language plpgsql;
commit;
`),
},
"migrations/60_wh_domain_types.down.sql": {
name: "60_wh_domain_types.down.sql",
bytes: []byte(`
begin;
drop function wh_current_time_id;
drop function wh_current_date_id;
drop function wh_time_id;
drop function wh_date_id;
drop domain wh_dim_text;
drop domain wh_timestamp;
drop domain wh_public_id;
drop domain wh_dim_id;
drop function wh_dim_id;
drop domain wh_bytes_transmitted;
drop domain wh_inet_port;
commit;
`),
},
"migrations/60_wh_domain_types.up.sql": {
name: "60_wh_domain_types.up.sql",
bytes: []byte(`
begin;
create extension if not exists "pgcrypto";
create domain wh_inet_port as integer
check(
value > 0
and
value <= 65535
);
comment on domain wh_inet_port is
'An ordinal number between 1 and 65535 representing a network port';
create domain wh_bytes_transmitted as bigint
check(
value >= 0
);
comment on domain wh_bytes_transmitted is
'A non-negative integer representing the number of bytes transmitted';
-- wh_dim_id generates and returns a random ID which should be considered as
-- universally unique.
create or replace function wh_dim_id()
returns text
as $$
select encode(digest(gen_random_bytes(16), 'sha256'), 'base64');
$$ language sql;
create domain wh_dim_id as text
check(
length(trim(value)) > 0
);
comment on domain wh_dim_id is
'Random ID generated with pgcrypto';
create domain wh_public_id as text
check(
value = 'None'
or
length(trim(value)) > 10
);
comment on domain wh_public_id is
'Equivalent to wt_public_id but also allows the value to be ''None''';
create domain wh_timestamp as timestamp with time zone not null;
comment on domain wh_timestamp is
'Timestamp used in warehouse tables';
create domain wh_dim_text as text not null
check(
length(trim(value)) > 0
);
comment on domain wh_dim_text is
'Text fields in dimension tables are always not null and always not empty strings';
-- wh_date_id returns the wh_date_dimension id for ts.
create or replace function wh_date_id(ts wh_timestamp)
returns integer
as $$
select to_char(ts, 'YYYYMMDD')::integer;
$$ language sql;
-- wh_time_id returns the wh_time_of_day_dimension id for ts.
create or replace function wh_time_id(ts wh_timestamp)
returns integer
as $$
select to_char(ts, 'SSSS')::integer;
$$ language sql;
-- wh_date_id returns the wh_date_dimension id for current_timestamp.
create or replace function wh_current_date_id()
returns integer
as $$
select wh_date_id(current_timestamp);
$$ language sql;
-- wh_time_id returns the wh_time_of_day_dimension id for current_timestamp.
create or replace function wh_current_time_id()
returns integer
as $$
select wh_time_id(current_timestamp);
$$ language sql;
commit;
`),
},
"migrations/62_wh_datetime.down.sql": {
name: "62_wh_datetime.down.sql",
bytes: []byte(`
begin;
drop table wh_time_of_day_dimension;
drop table wh_date_dimension;
commit;
`),
},
"migrations/62_wh_datetime.up.sql": {
name: "62_wh_datetime.up.sql",
bytes: []byte(`
begin;
create table wh_date_dimension (
id integer primary key,
date date not null,
calendar_quarter wh_dim_text,
calendar_month wh_dim_text,
calendar_year smallint not null,
day_of_week wh_dim_text,
day_of_week_number smallint not null,
day_of_week_number_iso smallint not null,
day_of_week_number_zero_based smallint not null,
day_number_in_calendar_month smallint not null,
day_number_in_calendar_year smallint not null,
weekday_indicator wh_dim_text
);
insert into wh_date_dimension (
id, date,
calendar_quarter, calendar_month, calendar_year,
day_of_week, day_of_week_number, day_of_week_number_iso, day_of_week_number_zero_based,
day_number_in_calendar_month, day_number_in_calendar_year,
weekday_indicator
) values (
-1, 'infinity',
'None', 'None', -1,
'None', -1, -1, -1,
-1, -1,
'None'
);
insert
into wh_date_dimension
select to_char(t.day, 'YYYYMMDD')::integer as id,
t.day::date as date,
'Q' || to_char(t.day, 'Q') as calendar_quarter,
to_char(t.day, 'Month') as calendar_month,
extract(year from t.day) as calendar_year,
to_char(t.day, 'Day') as day_of_week,
to_char(t.day, 'D')::int as day_of_week_number,
extract(isodow from t.day) as day_of_week_number_iso,
extract(dow from t.day) as day_of_week_number_zero_based,
extract(day from t.day) as day_number_in_calendar_month,
extract(doy from t.day) as day_number_in_calendar_year,
case extract(isodow from t.day)
when 6 then 'Weekend'
when 7 then 'Weekend'
else 'Weekday'
end as weekday_indicator
from generate_series(
date_trunc('day', timestamp '2019-10-09'),
date_trunc('day', timestamp '2019-10-09' + interval '50 years'),
interval '1 day'
) as t(day);
create table wh_time_of_day_dimension (
id integer primary key,
time_no_zone time not null,
time_at_utc timetz not null,
hour_of_day smallint not null,
minute_of_hour smallint not null,
second_of_minute smallint not null,
display_time_24 wh_dim_text,
display_time_12 wh_dim_text,
meridiem_indicator wh_dim_text
);
set timezone = 'UTC';
insert into wh_time_of_day_dimension (
id, time_no_zone, time_at_utc,
hour_of_day, minute_of_hour, second_of_minute,
display_time_24, display_time_12, meridiem_indicator
) values (
-1, 'allballs', 'allballs',
-1, -1, -1,
'None', 'None', 'None'
);
insert
into wh_time_of_day_dimension
select to_char(t.second, 'SSSS')::integer as id,
t.second::time as time_no_zone,
t.second::time as time_at_utc,
extract(hour from t.second) as hour_of_day,
extract(minute from t.second) as minute_of_hour,
extract(second from t.second) as second_of_minute,
to_char(t.second, 'HH24:MI:SS') as display_time_24,
to_char(t.second, 'HH12:MI:SS AM') as display_time_12,
to_char(t.second, 'PM') as meridiem_indicator
from generate_series(
date_trunc('day', current_timestamp),
date_trunc('day', current_timestamp) + interval '24 hours' - interval '1 second',
interval '1 second'
) as t(second);
commit;
`),
},
"migrations/65_wh_session_dimensions.down.sql": {
name: "65_wh_session_dimensions.down.sql",
bytes: []byte(`
begin;
drop view whx_user_dimension_target;
drop view whx_user_dimension_source;
drop view whx_host_dimension_target;
drop view whx_host_dimension_source;
drop table wh_user_dimension;
drop table wh_host_dimension;
commit;
`),
},
"migrations/65_wh_session_dimensions.up.sql": {
name: "65_wh_session_dimensions.up.sql",
bytes: []byte(`
begin;
create table wh_host_dimension (
-- random id generated using encode(digest(gen_random_bytes(16), 'sha256'), 'base64')
-- this is done to prevent conflicts with rows in other clusters
-- which enables warehouse data from multiple clusters to be loaded into a
-- single database instance
id wh_dim_id primary key default wh_dim_id(),
host_id wh_public_id not null,
host_type wh_dim_text,
host_name wh_dim_text,
host_description wh_dim_text,
host_address wh_dim_text,
host_set_id wh_public_id not null,
host_set_type wh_dim_text,
host_set_name wh_dim_text,
host_set_description wh_dim_text,
host_catalog_id wh_public_id not null,
host_catalog_type wh_dim_text,
host_catalog_name wh_dim_text,
host_catalog_description wh_dim_text,
target_id wh_public_id not null,
target_type wh_dim_text,
target_name wh_dim_text,
target_description wh_dim_text,
target_default_port_number integer not null,
target_session_max_seconds integer not null,
target_session_connection_limit integer not null,
project_id wt_scope_id not null,
project_name wh_dim_text,
project_description wh_dim_text,
host_organization_id wt_scope_id not null,
host_organization_name wh_dim_text,
host_organization_description wh_dim_text,
current_row_indicator wh_dim_text,
row_effective_time wh_timestamp,
row_expiration_time wh_timestamp
);
-- https://www.postgresql.org/docs/current/indexes-partial.html
create unique index wh_host_dim_current_constraint
on wh_host_dimension (target_id, host_set_id, host_id)
where current_row_indicator = 'Current';
-- The whx_host_dimension_source and whx_host_dimension_target views are used
-- by an insert trigger to determine if the current row for the dimension has
-- changed and new one needs to be inserted. The first column in the target
-- view must be the current warehouse id and all remaining columns must match
-- the columns in the source view.
-- The whx_host_dimension_source view shows the current values in the
-- operational tables of the host dimension.
create view whx_host_dimension_source as
select -- id is the first column in the target view
h.public_id as host_id,
'static host' as host_type,
coalesce(h.name, 'None') as host_name,
coalesce(h.description, 'None') as host_description,
coalesce(h.address, 'Unknown') as host_address,
s.public_id as host_set_id,
'static host set' as host_set_type,
coalesce(s.name, 'None') as host_set_name,
coalesce(s.description, 'None') as host_set_description,
c.public_id as host_catalog_id,
'static host catalog' as host_catalog_type,
coalesce(c.name, 'None') as host_catalog_name,
coalesce(c.description, 'None') as host_catalog_description,
t.public_id as target_id,
'tcp target' as target_type,
coalesce(t.name, 'None') as target_name,
coalesce(t.description, 'None') as target_description,
coalesce(t.default_port, 0) as target_default_port_number,
t.session_max_seconds as target_session_max_seconds,
t.session_connection_limit as target_session_connection_limit,
p.public_id as project_id,
coalesce(p.name, 'None') as project_name,
coalesce(p.description, 'None') as project_description,
o.public_id as host_organization_id,
coalesce(o.name, 'None') as host_organization_name,
coalesce(o.description, 'None') as host_organization_description
from static_host as h,
static_host_catalog as c,
static_host_set_member as m,
static_host_set as s,
target_host_set as ts,
target_tcp as t,
iam_scope as p,
iam_scope as o
where h.catalog_id = c.public_id
and h.public_id = m.host_id
and s.public_id = m.set_id
and t.public_id = ts.target_id
and s.public_id = ts.host_set_id
and p.public_id = t.scope_id
and p.type = 'project'
and o.public_id = p.parent_id
and o.type = 'org'
;
-- The whx_host_dimension_target view shows the rows in the wh_host_dimension
-- table marked as 'Current'.
create view whx_host_dimension_target as
select id,
host_id,
host_type,
host_name,
host_description,
host_address,
host_set_id,
host_set_type,
host_set_name,
host_set_description,
host_catalog_id,
host_catalog_type,
host_catalog_name,
host_catalog_description,
target_id,
target_type,
target_name,
target_description,
target_default_port_number,
target_session_max_seconds,
target_session_connection_limit,
project_id,
project_name,
project_description,
host_organization_id,
host_organization_name,
host_organization_description
from wh_host_dimension
where current_row_indicator = 'Current'
;
create table wh_user_dimension (
-- random id generated using encode(digest(gen_random_bytes(16), 'sha256'), 'base64')
-- this is done to prevent conflicts with rows in other clusters
-- which enables warehouse data from multiple clusters to be loaded into a
-- single database instance
id wh_dim_id primary key default wh_dim_id(),
user_id wt_user_id not null,
user_name wh_dim_text,
user_description wh_dim_text,
auth_account_id wh_public_id not null,
auth_account_type wh_dim_text,
auth_account_name wh_dim_text,
auth_account_description wh_dim_text,
auth_method_id wh_public_id not null,
auth_method_type wh_dim_text,
auth_method_name wh_dim_text,
auth_method_description wh_dim_text,
user_organization_id wt_scope_id not null,
user_organization_name wh_dim_text,
user_organization_description wh_dim_text,
current_row_indicator wh_dim_text,
row_effective_time wh_timestamp,
row_expiration_time wh_timestamp
);
-- The whx_user_dimension_source and whx_user_dimension_target views are used
-- by an insert trigger to determine if the current row for the dimension has
-- changed and new one needs to be inserted. The first column in the target
-- view must be the current warehouse id and all remaining columns must match
-- the columns in the source view.
-- The whx_user_dimension_source view shows the current values in the
-- operational tables of the user dimension.
create view whx_user_dimension_source as
select -- id is the first column in the target view
u.public_id as user_id,
coalesce(u.name, 'None') as user_name,
coalesce(u.description, 'None') as user_description,
coalesce(aa.public_id, 'None') as auth_account_id,
case when aa.public_id is null then 'None'
else 'password auth account'
end as auth_account_type,
coalesce(apa.name, 'None') as auth_account_name,
coalesce(apa.description, 'None') as auth_account_description,
coalesce(am.public_id, 'None') as auth_method_id,
case when am.public_id is null then 'None'
else 'password auth method'
end as auth_method_type,
coalesce(apm.name, 'None') as auth_method_name,
coalesce(apm.description, 'None') as auth_method_description,
org.public_id as user_organization_id,
coalesce(org.name, 'None') as user_organization_name,
coalesce(org.description, 'None') as user_organization_description
from iam_user as u
left join auth_account as aa on u.public_id = aa.iam_user_id
left join auth_method as am on aa.auth_method_id = am.public_id
left join auth_password_account as apa on aa.public_id = apa.public_id
left join auth_password_method as apm on am.public_id = apm.public_id
join iam_scope as org on u.scope_id = org.public_id
;
-- The whx_user_dimension_target view shows the rows in the wh_user_dimension
-- table marked as 'Current'.
create view whx_user_dimension_target as
select id,
user_id,
user_name,
user_description,
auth_account_id,
auth_account_type,
auth_account_name,
auth_account_description,
auth_method_id,
auth_method_type,
auth_method_name,
auth_method_description,
user_organization_id,
user_organization_name,
user_organization_description
from wh_user_dimension
where current_row_indicator = 'Current'
;
commit;
`),
},
"migrations/66_wh_session_dimensions.down.sql": {
name: "66_wh_session_dimensions.down.sql",
bytes: []byte(`
begin;
drop function wh_upsert_user;
drop function wh_upsert_host;
commit;
`),
},
"migrations/66_wh_session_dimensions.up.sql": {
name: "66_wh_session_dimensions.up.sql",
bytes: []byte(`
begin;
-- wh_upsert_host returns the wh_host_dimension id for p_host_id,
-- p_host_set_id, and p_target_id. wh_upsert_host compares the current values
-- in the wh_host_dimension with the current values in the operational tables
-- for the provide parameters. If the values between the operational tables
-- and the wh_host_dimension differ, a new row is inserted in the
-- wh_host_dimension to match the current values in the operational tables and
-- the new id is returned. If the values do not differ, the current id is
-- returned.
create or replace function wh_upsert_host(p_host_id wt_public_id, p_host_set_id wt_public_id, p_target_id wt_public_id)
returns wh_dim_id
as $$
declare
src whx_host_dimension_target%rowtype;
target whx_host_dimension_target%rowtype;
new_row wh_host_dimension%rowtype;
begin
select * into target
from whx_host_dimension_target as t
where t.host_id = p_host_id
and t.host_set_id = p_host_set_id
and t.target_id = p_target_id;
select target.id, t.* into src
from whx_host_dimension_source as t
where t.host_id = p_host_id
and t.host_set_id = p_host_set_id
and t.target_id = p_target_id;
if src is distinct from target then
-- expire the current row
update wh_host_dimension
set current_row_indicator = 'Expired',
row_expiration_time = current_timestamp
where host_id = p_host_id
and host_set_id = p_host_set_id
and target_id = p_target_id
and current_row_indicator = 'Current';
-- insert a new row
insert into wh_host_dimension (
host_id, host_type, host_name, host_description, host_address,
host_set_id, host_set_type, host_set_name, host_set_description,
host_catalog_id, host_catalog_type, host_catalog_name, host_catalog_description,
target_id, target_type, target_name, target_description,
target_default_port_number, target_session_max_seconds, target_session_connection_limit,
project_id, project_name, project_description,
host_organization_id, host_organization_name, host_organization_description,
current_row_indicator, row_effective_time, row_expiration_time
)
select host_id, host_type, host_name, host_description, host_address,
host_set_id, host_set_type, host_set_name, host_set_description,
host_catalog_id, host_catalog_type, host_catalog_name, host_catalog_description,
target_id, target_type, target_name, target_description,
target_default_port_number, target_session_max_seconds, target_session_connection_limit,
project_id, project_name, project_description,
host_organization_id, host_organization_name, host_organization_description,
'Current', current_timestamp, 'infinity'::timestamptz
from whx_host_dimension_source
where host_id = p_host_id
and host_set_id = p_host_set_id
and target_id = p_target_id
returning * into new_row;
return new_row.id;
end if;
return target.id;
end;
$$ language plpgsql;
-- wh_upsert_user returns the wh_user_dimension id for p_user_id and
-- p_auth_token_id. wh_upsert_user compares the current values in the
-- wh_user_dimension with the current values in the operational tables for the
-- provide parameters. If the values between the operational tables and the
-- wh_user_dimension differ, a new row is inserted in the wh_user_dimension to
-- match the current values in the operational tables and the new id is
-- returned. If the values do not differ, the current id is returned.
create or replace function wh_upsert_user(p_user_id wt_user_id, p_auth_token_id wt_public_id)
returns wh_dim_id
as $$
declare
src whx_user_dimension_target%rowtype;
target whx_user_dimension_target%rowtype;
new_row wh_user_dimension%rowtype;
acct_id wt_public_id;
begin
select auth_account_id into strict acct_id
from auth_token
where public_id = p_auth_token_id;
select * into target
from whx_user_dimension_target as t
where t.user_id = p_user_id
and t.auth_account_id = acct_id;
select target.id, t.* into src
from whx_user_dimension_source as t
where t.user_id = p_user_id
and t.auth_account_id = acct_id;
if src is distinct from target then
-- expire the current row
update wh_user_dimension
set current_row_indicator = 'Expired',
row_expiration_time = current_timestamp
where user_id = p_user_id
and auth_account_id = acct_id
and current_row_indicator = 'Current';
-- insert a new row
insert into wh_user_dimension (
user_id, user_name, user_description,
auth_account_id, auth_account_type, auth_account_name, auth_account_description,
auth_method_id, auth_method_type, auth_method_name, auth_method_description,
user_organization_id, user_organization_name, user_organization_description,
current_row_indicator, row_effective_time, row_expiration_time
)
select user_id, user_name, user_description,
auth_account_id, auth_account_type, auth_account_name, auth_account_description,
auth_method_id, auth_method_type, auth_method_name, auth_method_description,
user_organization_id, user_organization_name, user_organization_description,
'Current', current_timestamp, 'infinity'::timestamptz
from whx_user_dimension_source
where user_id = p_user_id
and auth_account_id = acct_id
returning * into new_row;
return new_row.id;
end if;
return target.id;
end;
$$ language plpgsql;
commit;
`),
},
"migrations/68_wh_session_facts.down.sql": {
name: "68_wh_session_facts.down.sql",
bytes: []byte(`
begin;
drop table wh_session_connection_accumulating_fact;
drop table wh_session_accumulating_fact;
commit;
`),
},
"migrations/68_wh_session_facts.up.sql": {
name: "68_wh_session_facts.up.sql",
bytes: []byte(`
begin;
-- Column names for numeric fields that are not a measurement end in id or
-- number. This naming convention enables automatic field type detection in
-- certain data analysis tools.
-- https://help.tableau.com/current/pro/desktop/en-us/data_clean_adm.htm
-- The wh_session_accumulating_fact table is an accumulating snapshot.
-- The table wh_session_accumulating_fact is an accumulating fact table.
-- The grain of the fact table is one row per session.
create table wh_session_accumulating_fact (
session_id wt_public_id primary key,
-- auth token id is a degenerate dimension
auth_token_id wt_public_id not null,
-- foreign keys to the dimension tables
host_id wh_dim_id not null
references wh_host_dimension (id)
on delete restrict
on update cascade,
user_id wh_dim_id not null
references wh_user_dimension (id)
on delete restrict
on update cascade,
-- TODO(mgaffney) 09/2020: add dimension and foreign key for the session
-- termination reason
-- date and time foreign keys
session_pending_date_id integer not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_pending_time_id integer not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_pending_time wh_timestamp,
session_active_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_active_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_active_time wh_timestamp default 'infinity'::timestamptz,
session_canceling_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_canceling_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_canceling_time wh_timestamp default 'infinity'::timestamptz,
session_terminated_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_terminated_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_terminated_time wh_timestamp default 'infinity'::timestamptz,
-- TODO(mgaffney) 09/2020: add columns for session expiration
-- TODO(mgaffney) 09/2020: add connection limit. This may need a dimension
-- table and foreign key column to represent unlimited connections.
-- The total number of connections made during the session.
total_connection_count bigint, -- will be null until the first connection is created
-- The total number of bytes received by workers from the client and sent
-- to the endpoint for this session.
-- total_bytes_up is a fully additive measurement.
total_bytes_up wh_bytes_transmitted, -- will be null until the first connection is closed
-- The total number of bytes received by workers from the endpoint and sent
-- to the client for this session.
-- total_bytes_down is a fully additive measurement.
total_bytes_down wh_bytes_transmitted -- will be null until the first connection is closed
);
-- The wh_session_connection_accumulating_fact table is an accumulating fact table.
-- The grain of the fact table is one row per session connection.
create table wh_session_connection_accumulating_fact (
connection_id wt_public_id primary key,
-- session_id is a degenerate dimension
session_id wt_public_id not null
references wh_session_accumulating_fact (session_id)
on delete cascade
on update cascade,
-- foreign keys to the dimension tables
host_id wh_dim_id not null
references wh_host_dimension (id)
on delete restrict
on update cascade,
user_id wh_dim_id not null
references wh_user_dimension (id)
on delete restrict
on update cascade,
-- TODO(mgaffney) 09/2020: add dimension and foreign key for the connection
-- closed reason
-- date and time foreign keys and timestamps
connection_authorized_date_id integer not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_authorized_time_id integer not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_authorized_time wh_timestamp,
connection_connected_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_connected_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_connected_time wh_timestamp default 'infinity'::timestamptz,
connection_closed_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_closed_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_closed_time wh_timestamp default 'infinity'::timestamptz,
-- TODO(mgaffney) 09/2020: add a connection_duration_in_seconds column
-- The client address and port are degenerate dimensions
client_tcp_address inet, -- can be null
client_tcp_port_number wh_inet_port, -- can be null
-- The endpoint address and port are degenerate dimensions
endpoint_tcp_address inet, -- can be null
endpoint_tcp_port_number wh_inet_port, -- can be null
-- the connection_count must always be 1
-- this is a common pattern in data warehouse models
-- See The Data Warehouse Toolkit, Third Edition
-- by Ralph Kimball and Margy Ross for more information
connection_count smallint default 1 not null
constraint connection_count_must_be_1
check(connection_count = 1),
-- The total number of bytes received by the worker from the client and sent
-- to the endpoint for this connection.
-- bytes_up is a fully additive measurement.
bytes_up wh_bytes_transmitted, -- can be null
-- The total number of bytes received by the worker from the endpoint and sent
-- to the client for this connection.
-- bytes_down is a fully additive measurement.
bytes_down wh_bytes_transmitted -- can be null
);
-- TODO(mgaffney) 09/2020: Research and test if the comment fields are used by
-- data analysis tools.
comment on table wh_session_connection_accumulating_fact is
'The Wh Session Connection Accumulating Fact table is an accumulating fact table. '
'The grain of the fact table is one row per session connection.';
comment on column wh_session_connection_accumulating_fact.bytes_up is
'Bytes Up is the total number of bytes received by the worker from the '
'client and sent to the endpoint for this connection. Bytes Up is a fully '
'additive measurement.';
comment on column wh_session_connection_accumulating_fact.bytes_down is
'Bytes Down is the total number of bytes received by the worker from the '
'endpoint and sent to the client for this connection. Bytes Down is a fully '
'additive measurement.';
create index on wh_session_connection_accumulating_fact(session_id);
commit;
`),
},
"migrations/69_wh_session_facts.down.sql": {
name: "69_wh_session_facts.down.sql",
bytes: []byte(`
begin;
drop trigger wh_insert_session_connection_state on session_connection_state;
drop function wh_insert_session_connection_state;
drop trigger wh_insert_session_state on session_state;
drop function wh_insert_session_state;
drop trigger wh_update_session_connection on session_connection;
drop function wh_update_session_connection;
drop trigger wh_insert_session_connection on session_connection;
drop function wh_insert_session_connection;
drop trigger wh_insert_session on session;
drop function wh_insert_session;
drop function wh_rollup_connections;
commit;
`),
},
"migrations/69_wh_session_facts.up.sql": {
name: "69_wh_session_facts.up.sql",
bytes: []byte(`
begin;
-- wh_rollup_connections calculates the aggregate values from
-- wh_session_connection_accumulating_fact for p_session_id and updates
-- wh_session_accumulating_fact for p_session_id with those values.
create or replace function wh_rollup_connections(p_session_id wt_public_id)
returns void
as $$
declare
session_row wh_session_accumulating_fact%rowtype;
begin
with
session_totals (session_id, total_connection_count, total_bytes_up, total_bytes_down) as (
select session_id,
sum(connection_count),
sum(bytes_up),
sum(bytes_down)
from wh_session_connection_accumulating_fact
where session_id = p_session_id
group by session_id
)
update wh_session_accumulating_fact
set total_connection_count = session_totals.total_connection_count,
total_bytes_up = session_totals.total_bytes_up,
total_bytes_down = session_totals.total_bytes_down
from session_totals
where wh_session_accumulating_fact.session_id = session_totals.session_id
returning wh_session_accumulating_fact.* into strict session_row;
end;
$$ language plpgsql;
--
-- Session triggers
--
-- wh_insert_session returns an after insert trigger for the session table
-- which inserts a row in wh_session_accumulating_fact for the new session.
-- wh_insert_session also calls the wh_upsert_host and wh_upsert_user
-- functions which can result in new rows in wh_host_dimension and
-- wh_user_dimension respectively.
create or replace function wh_insert_session()
returns trigger
as $$
declare
new_row wh_session_accumulating_fact%rowtype;
begin
with
pending_timestamp (date_dim_id, time_dim_id, ts) as (
select wh_date_id(start_time), wh_time_id(start_time), start_time
from session_state
where session_id = new.public_id
and state = 'pending'
)
insert into wh_session_accumulating_fact (
session_id,
auth_token_id,
host_id,
user_id,
session_pending_date_id,
session_pending_time_id,
session_pending_time
)
select new.public_id,
new.auth_token_id,
wh_upsert_host(new.host_id, new.host_set_id, new.target_id),
wh_upsert_user(new.user_id, new.auth_token_id),
pending_timestamp.date_dim_id,
pending_timestamp.time_dim_id,
pending_timestamp.ts
from pending_timestamp
returning * into strict new_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session
after insert on session
for each row
execute function wh_insert_session();
--
-- Session Connection triggers
--
-- wh_insert_session_connection returns an after insert trigger for the
-- session_connection table which inserts a row in
-- wh_session_connection_accumulating_fact for the new session connection.
-- wh_insert_session_connection also calls wh_rollup_connections which can
-- result in updates to wh_session_accumulating_fact.
create or replace function wh_insert_session_connection()
returns trigger
as $$
declare
new_row wh_session_connection_accumulating_fact%rowtype;
begin
with
authorized_timestamp (date_dim_id, time_dim_id, ts) as (
select wh_date_id(start_time), wh_time_id(start_time), start_time
from session_connection_state
where connection_id = new.public_id
and state = 'authorized'
),
session_dimension (host_dim_id, user_dim_id) as (
select host_id, user_id
from wh_session_accumulating_fact
where session_id = new.session_id
)
insert into wh_session_connection_accumulating_fact (
connection_id,
session_id,
host_id,
user_id,
connection_authorized_date_id,
connection_authorized_time_id,
connection_authorized_time,
client_tcp_address,
client_tcp_port_number,
endpoint_tcp_address,
endpoint_tcp_port_number,
bytes_up,
bytes_down
)
select new.public_id,
new.session_id,
session_dimension.host_dim_id,
session_dimension.user_dim_id,
authorized_timestamp.date_dim_id,
authorized_timestamp.time_dim_id,
authorized_timestamp.ts,
new.client_tcp_address,
new.client_tcp_port,
new.endpoint_tcp_address,
new.endpoint_tcp_port,
new.bytes_up,
new.bytes_down
from authorized_timestamp,
session_dimension
returning * into strict new_row;
perform wh_rollup_connections(new.session_id);
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_connection
after insert on session_connection
for each row
execute function wh_insert_session_connection();
-- wh_update_session_connection returns an after update trigger for the
-- session_connection table which updates a row in
-- wh_session_connection_accumulating_fact for the session connection.
-- wh_update_session_connection also calls wh_rollup_connections which can
-- result in updates to wh_session_accumulating_fact.
create or replace function wh_update_session_connection()
returns trigger
as $$
declare
updated_row wh_session_connection_accumulating_fact%rowtype;
begin
update wh_session_connection_accumulating_fact
set client_tcp_address = new.client_tcp_address,
client_tcp_port_number = new.client_tcp_port,
endpoint_tcp_address = new.endpoint_tcp_address,
endpoint_tcp_port_number = new.endpoint_tcp_port,
bytes_up = new.bytes_up,
bytes_down = new.bytes_down
where connection_id = new.public_id
returning * into strict updated_row;
perform wh_rollup_connections(new.session_id);
return null;
end;
$$ language plpgsql;
create trigger wh_update_session_connection
after update on session_connection
for each row
execute function wh_update_session_connection();
--
-- Session State trigger
--
-- wh_insert_session_state returns an after insert trigger for the
-- session_state table which updates wh_session_accumulating_fact.
create or replace function wh_insert_session_state()
returns trigger
as $$
declare
date_col text;
time_col text;
ts_col text;
q text;
session_row wh_session_accumulating_fact%rowtype;
begin
if new.state = 'pending' then
-- The pending state is the first state which is handled by the
-- wh_insert_session trigger. The update statement in this trigger will
-- fail for the pending state because the row for the session has not yet
-- been inserted into the wh_session_accumulating_fact table.
return null;
end if;
date_col = 'session_' || new.state || '_date_id';
time_col = 'session_' || new.state || '_time_id';
ts_col = 'session_' || new.state || '_time';
q = format('update wh_session_accumulating_fact
set (%I, %I, %I) = (select wh_date_id(%L), wh_time_id(%L), %L::timestamptz)
where session_id = %L
returning *',
date_col, time_col, ts_col,
new.start_time, new.start_time, new.start_time,
new.session_id);
execute q into strict session_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_state
after insert on session_state
for each row
execute function wh_insert_session_state();
--
-- Session Connection State trigger
--
-- wh_insert_session_connection_state returns an after insert trigger for the
-- session_connection_state table which updates
-- wh_session_connection_accumulating_fact.
create or replace function wh_insert_session_connection_state()
returns trigger
as $$
declare
date_col text;
time_col text;
ts_col text;
q text;
connection_row wh_session_connection_accumulating_fact%rowtype;
begin
if new.state = 'authorized' then
-- The authorized state is the first state which is handled by the
-- wh_insert_session_connection trigger. The update statement in this
-- trigger will fail for the authorized state because the row for the
-- session connection has not yet been inserted into the
-- wh_session_connection_accumulating_fact table.
return null;
end if;
date_col = 'connection_' || new.state || '_date_id';
time_col = 'connection_' || new.state || '_time_id';
ts_col = 'connection_' || new.state || '_time';
q = format('update wh_session_connection_accumulating_fact
set (%I, %I, %I) = (select wh_date_id(%L), wh_time_id(%L), %L::timestamptz)
where connection_id = %L
returning *',
date_col, time_col, ts_col,
new.start_time, new.start_time, new.start_time,
new.connection_id);
execute q into strict connection_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_connection_state
after insert on session_connection_state
for each row
execute function wh_insert_session_connection_state();
commit;
`),
},
}