You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
boundary/internal/db/schema/postgres_migration.gen.go

7791 lines
274 KiB

package schema
// Code generated by "make migrations"; DO NOT EDIT.
func init() {
migrationStates["postgres"] = migrationState{
binarySchemaVersion: 13001,
upMigrations: map[int][]byte{
1: []byte(`
create domain wt_public_id as text
check(
length(trim(value)) > 10
);
comment on domain wt_public_id is
'Random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create domain wt_private_id as text
not null
check(
length(trim(value)) > 10
);
comment on domain wt_private_id is
'Random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create domain wt_scope_id as text
check(
length(trim(value)) > 10 or value = 'global'
);
comment on domain wt_scope_id is
'"global" or random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create domain wt_user_id as text
not null
check(
length(trim(value)) > 10 or value = 'u_anon' or value = 'u_auth' or value = 'u_recovery'
);
comment on domain wt_scope_id is
'"u_anon", "u_auth", or random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create domain wt_role_id as text
not null
check(
length(trim(value)) > 10
);
comment on domain wt_scope_id is
'Random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create domain wt_timestamp as
timestamp with time zone
default current_timestamp;
comment on domain wt_timestamp is
'Standard timestamp for all create_time and update_time columns';
create or replace function
update_time_column()
returns trigger
as $$
begin
if row(new.*) is distinct from row(old.*) then
new.update_time = now();
return new;
else
return old;
end if;
end;
$$ language plpgsql;
comment on function
update_time_column()
is
'function used in before update triggers to properly set update_time columns';
create or replace function
default_create_time()
returns trigger
as $$
begin
if new.create_time is distinct from now() then
raise warning 'create_time cannot be set to %', new.create_time;
new.create_time = now();
end if;
return new;
end;
$$ language plpgsql;
comment on function
default_create_time()
is
'function used in before insert triggers to set create_time column to now';
create domain wt_version as bigint
default 1
not null
check(
value > 0
);
comment on domain wt_version is
'standard column for row version';
-- update_version_column() will increment the version column whenever row data
-- is updated and should only be used in an update after trigger. This function
-- will overwrite any explicit updates to the version column. The function
-- accepts an optional parameter of 'private_id' for the tables primary key.
create or replace function
update_version_column()
returns trigger
as $$
begin
if pg_trigger_depth() = 1 then
if row(new.*) is distinct from row(old.*) then
if tg_nargs = 0 then
execute format('update %I set version = $1 where public_id = $2', tg_relid::regclass) using old.version+1, new.public_id;
new.version = old.version + 1;
return new;
end if;
if tg_argv[0] = 'private_id' then
execute format('update %I set version = $1 where private_id = $2', tg_relid::regclass) using old.version+1, new.private_id;
new.version = old.version + 1;
return new;
end if;
end if;
end if;
return new;
end;
$$ language plpgsql;
comment on function
update_version_column()
is
'function used in after update triggers to properly set version columns';
-- immutable_columns() will make the column names immutable which are passed as
-- parameters when the trigger is created. It raises error code 23601 which is a
-- class 23 integrity constraint violation: immutable column
create or replace function
immutable_columns()
returns trigger
as $$
declare
col_name text;
new_value text;
old_value text;
begin
foreach col_name in array tg_argv loop
execute format('SELECT $1.%I', col_name) into new_value using new;
execute format('SELECT $1.%I', col_name) into old_value using old;
if new_value is distinct from old_value then
raise exception 'immutable column: %.%', tg_table_name, col_name using
errcode = '23601',
schema = tg_table_schema,
table = tg_table_name,
column = col_name;
end if;
end loop;
return new;
end;
$$ language plpgsql;
comment on function
immutable_columns()
is
'function used in before update triggers to make columns immutable';
`),
2: []byte(`
-- TODO (jimlambrt 7/2020) remove update_time
create table if not exists oplog_entry (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
version text not null,
aggregate_name text not null,
"data" bytea not null
);
create trigger
update_time_column
before
update on oplog_entry
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_entry
for each row execute procedure default_create_time();
-- oplog_entry is immutable.
create trigger
immutable_columns
before
update on oplog_entry
for each row execute procedure immutable_columns('id','update_time','create_time','version','aggregate_name', 'data');
create table if not exists oplog_ticket (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
"name" text not null unique,
"version" bigint not null
);
create trigger
update_time_column
before
update on oplog_ticket
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_ticket
for each row execute procedure default_create_time();
-- oplog_ticket: only allow updates to: version and update_time
create trigger
immutable_columns
before
update on oplog_ticket
for each row execute procedure immutable_columns('id','create_time','name');
-- TODO (jimlambrt 7/2020) remove update_time
create table if not exists oplog_metadata (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
entry_id bigint not null references oplog_entry(id) on delete cascade on update cascade,
"key" text not null,
value text null
);
create trigger
update_time_column
before
update on oplog_metadata
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on oplog_metadata
for each row execute procedure default_create_time();
-- oplog_metadata is immutable
create trigger
immutable_columns
before
update on oplog_metadata
for each row execute procedure immutable_columns('id','create_time','update_time','entry_id','key','value');
create index if not exists idx_oplog_metatadata_key on oplog_metadata(key);
create index if not exists idx_oplog_metatadata_value on oplog_metadata(value);
insert into oplog_ticket (name, version)
values
('auth_token', 1),
('default', 1),
('iam_scope', 1),
('iam_user', 1),
('iam_group', 1),
('iam_group_member', 1),
('iam_role', 1),
('iam_role_grant', 1),
('iam_group_role', 1),
('iam_user_role', 1),
('db_test_user', 1),
('db_test_car', 1),
('db_test_rental', 1),
('db_test_scooter', 1),
('auth_account', 1),
('iam_principal_role', 1);
`),
3: []byte(`
-- create test tables used in the unit tests for the internal/db package
-- these tables (db_test_user, db_test_car, db_test_rental, db_test_scooter) are
-- not part of the boundary domain model... they are simply used for testing
-- the internal/db package
create table if not exists db_test_user (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
phone_number text,
email text,
version wt_version
);
create trigger
update_time_column
before
update on db_test_user
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_user
create trigger
immutable_columns
before
update on db_test_user
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_user
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on db_test_user
for each row execute procedure update_version_column();
create table if not exists db_test_car (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
model text,
mpg smallint
);
create trigger
update_time_column
before
update on db_test_car
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_car
create trigger
immutable_columns
before
update on db_test_car
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_car
for each row execute procedure default_create_time();
create table if not exists db_test_rental (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
public_id text not null unique,
name text unique,
user_id bigint not null references db_test_user(id),
car_id bigint not null references db_test_car(id)
);
create trigger
update_time_column
before
update on db_test_rental
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_rental
create trigger
immutable_columns
before
update on db_test_rental
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_rental
for each row execute procedure default_create_time();
create table if not exists db_test_scooter (
id bigint generated always as identity primary key,
create_time wt_timestamp,
update_time wt_timestamp,
private_id text not null unique,
name text unique,
model text,
mpg smallint
);
create trigger
update_time_column
before
update on db_test_scooter
for each row execute procedure update_time_column();
-- define the immutable fields for db_test_scooter
create trigger
immutable_columns
before
update on db_test_scooter
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on db_test_scooter
for each row execute procedure default_create_time();
`),
6: []byte(`
create table iam_scope_type_enm (
string text not null primary key
constraint only_predefined_scope_types_allowed
check(string in ('unknown', 'global', 'org', 'project'))
);
insert into iam_scope_type_enm (string)
values
('unknown'),
('global'),
('org'),
('project');
-- define the immutable fields of iam_scope_type_enm
create trigger
immutable_columns
before
update on iam_scope_type_enm
for each row execute procedure immutable_columns('string');
create table iam_scope (
public_id wt_scope_id primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
type text not null
references iam_scope_type_enm(string)
constraint only_known_scope_types_allowed
check(
(
type = 'global'
and parent_id is null
)
or (
type = 'org'
and parent_id = 'global'
)
or (
type = 'project'
and parent_id is not null
and parent_id != 'global'
)
),
description text,
parent_id text references iam_scope(public_id) on delete cascade on update cascade,
-- version allows optimistic locking of the role when modifying the role
-- itself and when modifying dependent items like principal roles.
version wt_version
);
create table iam_scope_global (
scope_id wt_scope_id primary key
references iam_scope(public_id)
on delete cascade
on update cascade
constraint only_one_global_scope_allowed
check(
scope_id = 'global'
),
name text unique
);
create table iam_scope_org (
scope_id wt_scope_id primary key
references iam_scope(public_id)
on delete cascade
on update cascade,
parent_id wt_scope_id
not null
references iam_scope_global(scope_id)
on delete cascade
on update cascade,
name text,
unique (parent_id, name)
);
create table iam_scope_project (
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
parent_id wt_public_id not null references iam_scope_org(scope_id) on delete cascade on update cascade,
name text,
unique(parent_id, name),
unique(scope_id),
primary key(scope_id, parent_id)
);
create or replace function
iam_sub_scopes_func()
returns trigger
as $$
declare parent_type int;
begin
if new.type = 'global' then
insert into iam_scope_global (scope_id, name)
values
(new.public_id, new.name);
return new;
end if;
if new.type = 'org' then
insert into iam_scope_org (scope_id, parent_id, name)
values
(new.public_id, new.parent_id, new.name);
return new;
end if;
if new.type = 'project' then
insert into iam_scope_project (scope_id, parent_id, name)
values
(new.public_id, new.parent_id, new.name);
return new;
end if;
raise exception 'unknown scope type';
end;
$$ language plpgsql;
create trigger
iam_scope_insert
after
insert on iam_scope
for each row execute procedure iam_sub_scopes_func();
create or replace function
disallow_global_scope_deletion()
returns trigger
as $$
begin
if old.type = 'global' then
raise exception 'deletion of global scope not allowed';
end if;
return old;
end;
$$ language plpgsql;
create trigger
iam_scope_disallow_global_deletion
before
delete on iam_scope
for each row execute procedure disallow_global_scope_deletion();
create trigger
update_time_column
before update on iam_scope
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_scope
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on iam_scope
for each row execute procedure update_version_column();
-- define the immutable fields for iam_scope
create trigger
immutable_columns
before
update on iam_scope
for each row execute procedure immutable_columns('public_id', 'create_time', 'type', 'parent_id');
-- define the immutable fields of iam_scope_global
create trigger
immutable_columns
before
update on iam_scope_global
for each row execute procedure immutable_columns('scope_id');
-- define the immutable fields of iam_scope_org
create trigger
immutable_columns
before
update on iam_scope_org
for each row execute procedure immutable_columns('scope_id');
-- define the immutable fields of iam_scope_project
create trigger
immutable_columns
before
update on iam_scope_project
for each row execute procedure immutable_columns('scope_id');
-- iam_sub_names will allow us to enforce the different name constraints for
-- orgs and projects via a before update trigger on the iam_scope
-- table.
create or replace function
iam_sub_names()
returns trigger
as $$
begin
if new.name != old.name then
if new.type = 'global' then
update iam_scope_global set name = new.name where scope_id = old.public_id;
return new;
end if;
if new.type = 'org' then
update iam_scope_org set name = new.name where scope_id = old.public_id;
return new;
end if;
if new.type = 'project' then
update iam_scope_project set name = new.name where scope_id = old.public_id;
return new;
end if;
raise exception 'unknown scope type';
end if;
return new;
end;
$$ language plpgsql;
create trigger
iam_sub_names
before
update on iam_scope
for each row execute procedure iam_sub_names();
insert into iam_scope (public_id, name, type, description)
values ('global', 'global', 'global', 'Global Scope');
create table iam_user (
public_id wt_user_id
primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
version wt_version,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
create or replace function
user_scope_id_valid()
returns trigger
as $$
begin
perform from iam_scope where public_id = new.scope_id and type in ('global', 'org');
if not found then
raise exception 'invalid scope type for user creation';
end if;
return new;
end;
$$ language plpgsql;
create or replace function
grant_scope_id_valid()
returns trigger
as $$
declare parent_scope_id text;
declare role_scope_type text;
begin
-- There is a not-null constraint so ensure that if the value passed in is
-- empty we simply set to the scope ID
if new.grant_scope_id = '' or new.grant_scope_id is null then
new.grant_scope_id = new.scope_id;
end if;
-- If the scopes match, it's allowed
if new.grant_scope_id = new.scope_id then
return new;
end if;
-- Fetch the type of scope
select isc.type from iam_scope isc where isc.public_id = new.scope_id into role_scope_type;
-- Always allowed
if role_scope_type = 'global' then
return new;
end if;
-- Never allowed; the case where it's set to the same scope ID as the project
-- itself is covered above
if role_scope_type = 'project' then
raise exception 'invalid to set grant_scope_id to non-same scope_id when role scope type is project';
end if;
if role_scope_type = 'org' then
-- Look up the parent scope ID for the scope ID given
select isc.parent_id from iam_scope isc where isc.public_id = new.grant_scope_id into parent_scope_id;
-- Allow iff the grant scope ID's parent matches the role's scope ID; that
-- is, match if the role belongs to a direct child scope of this
-- org
if parent_scope_id = new.scope_id then
return new;
end if;
raise exception 'grant_scope_id is not a child project of the role scope';
end if;
raise exception 'unknown scope type';
end;
$$ language plpgsql;
create or replace function
disallow_iam_predefined_user_deletion()
returns trigger
as $$
begin
if old.public_id = 'u_anon' then
raise exception 'deletion of anonymous user not allowed';
end if;
if old.public_id = 'u_auth' then
raise exception 'deletion of authenticated user not allowed';
end if;
if old.public_id = 'u_recovery' then
raise exception 'deletion of recovery user not allowed';
end if;
return old;
end;
$$ language plpgsql;
create trigger
update_version_column
after update on iam_user
for each row execute procedure update_version_column();
create trigger
ensure_user_scope_id_valid
before
insert or update on iam_user
for each row execute procedure user_scope_id_valid();
create trigger
update_time_column
before update on iam_user
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_user
for each row execute procedure default_create_time();
create trigger
iam_user_disallow_predefined_user_deletion
before
delete on iam_user
for each row execute procedure disallow_iam_predefined_user_deletion();
-- TODO: Do we want to disallow changing the name or description?
insert into iam_user (public_id, name, description, scope_id)
values ('u_anon', 'anonymous', 'The anonymous user matches any request, whether authenticated or not', 'global');
insert into iam_user (public_id, name, description, scope_id)
values ('u_auth', 'authenticated', 'The authenticated user matches any user that has a valid token', 'global');
insert into iam_user (public_id, name, description, scope_id)
values ('u_recovery', 'recovery', 'The recovery user is used for any request that was performed with the recovery KMS workflow', 'global');
-- define the immutable fields for iam_user
create trigger
immutable_columns
before
update on iam_user
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
create table iam_role (
public_id wt_role_id primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
grant_scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
version wt_version,
-- add unique index so a composite fk can be declared.
unique(scope_id, public_id)
);
-- Grants are immutable, which is enforced via the trigger below
create table iam_role_grant (
create_time wt_timestamp,
role_id wt_role_id -- pk
references iam_role(public_id)
on delete cascade
on update cascade,
canonical_grant text -- pk
constraint canonical_grant_must_not_be_empty
check(
length(trim(canonical_grant)) > 0
),
raw_grant text not null
constraint raw_grant_must_not_be_empty
check(
length(trim(raw_grant)) > 0
),
primary key(role_id, canonical_grant)
);
-- iam_immutable_role_grant() ensures that grants assigned to roles are immutable.
create or replace function
iam_immutable_role_grant()
returns trigger
as $$
begin
raise exception 'role grants are immutable';
end;
$$ language plpgsql;
create trigger immutable_role_grant
before
update on iam_role_grant
for each row execute procedure iam_immutable_role_grant();
create trigger
default_create_time_column
before
insert on iam_role_grant
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on iam_role
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on iam_role
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_role
for each row execute procedure default_create_time();
create trigger
ensure_grant_scope_id_valid
before
insert or update on iam_role
for each row execute procedure grant_scope_id_valid();
-- define the immutable fields for iam_role (started trigger name with "a_" so
-- it will run first)
create trigger
a_immutable_columns
before
update on iam_role
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
create or replace function
recovery_user_not_allowed()
returns trigger
as $$
declare
new_value text;
begin
execute format('SELECT $1.%I', tg_argv[0]) into new_value using new;
if new_value = 'u_recovery' then
raise exception '"u_recovery" not allowed here"';
end if;
return new;
end;
$$ language plpgsql;
create table iam_group (
public_id wt_public_id
primary key,
create_time wt_timestamp,
update_time wt_timestamp,
name text,
description text,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
unique(name, scope_id),
-- version allows optimistic locking of the group when modifying the group
-- itself and when modifying dependent items like group members.
version wt_version,
-- add unique index so a composite fk can be declared.
unique(scope_id, public_id)
);
create trigger
update_version_column
after update on iam_group
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on iam_group
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on iam_group
for each row execute procedure default_create_time();
-- define the immutable fields for iam_group
create trigger
immutable_columns
before
update on iam_group
for each row execute procedure immutable_columns('public_id', 'create_time', 'scope_id');
-- iam_user_role contains roles that have been assigned to users. Users can be
-- from any scope. The rows in this table must be immutable after insert, which
-- will be ensured with a before update trigger using
-- iam_immutable_role_principal().
create table iam_user_role (
create_time wt_timestamp,
role_id wt_role_id
references iam_role(public_id)
on delete cascade
on update cascade,
principal_id wt_user_id
references iam_user(public_id)
on delete cascade
on update cascade,
primary key (role_id, principal_id)
);
-- iam_group_role contains roles that have been assigned to groups.
-- Groups can be from any scope. The rows in this table must be immutable after
-- insert, which will be ensured with a before update trigger using
-- iam_immutable_role_principal().
create table iam_group_role (
create_time wt_timestamp,
role_id wt_role_id
references iam_role(public_id)
on delete cascade
on update cascade,
principal_id wt_public_id
references iam_group(public_id)
on delete cascade
on update cascade,
primary key (role_id, principal_id)
);
-- get_scoped_principal_id is used by the iam_principle_role view as a convient
-- way to create <scope_id>:<principal_id> to reference principals from
-- other scopes than the role's scope.
create or replace function get_scoped_principal_id(role_scope text, principal_scope text, principal_id text) returns text
as $$
begin
if role_scope = principal_scope then
return principal_id;
end if;
return principal_scope || ':' || principal_id;
end;
$$ language plpgsql;
-- iam_principle_role provides a consolidated view all principal roles assigned
-- (user and group roles).
-- REPLACED in 9/04_oidc_managed_group_principal_role
create view iam_principal_role as
select
ur.create_time,
ur.principal_id,
ur.role_id,
u.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, u.scope_id, ur.principal_id) as scoped_principal_id,
'user' as type
from
iam_user_role ur,
iam_role r,
iam_user u
where
ur.role_id = r.public_id and
u.public_id = ur.principal_id
union
select
gr.create_time,
gr.principal_id,
gr.role_id,
g.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, g.scope_id, gr.principal_id) as scoped_principal_id,
'group' as type
from
iam_group_role gr,
iam_role r,
iam_group g
where
gr.role_id = r.public_id and
g.public_id = gr.principal_id;
-- iam_immutable_role_principal() ensures that roles assigned to principals are immutable.
create or replace function
iam_immutable_role_principal()
returns trigger
as $$
begin
raise exception 'roles are immutable';
end;
$$ language plpgsql;
create trigger immutable_role_principal
before
update on iam_user_role
for each row execute procedure iam_immutable_role_principal();
create trigger
recovery_user_not_allowed_user_role
before
insert on iam_user_role
for each row execute procedure recovery_user_not_allowed('principal_id');
create trigger
default_create_time_column
before
insert on iam_user_role
for each row execute procedure default_create_time();
create trigger immutable_role_principal
before
update on iam_group_role
for each row execute procedure iam_immutable_role_principal();
create trigger
default_create_time_column
before
insert on iam_group_role
for each row execute procedure default_create_time();
-- iam_group_member_user is an association table that represents groups with
-- associated users.
create table iam_group_member_user (
create_time wt_timestamp,
group_id wt_public_id references iam_group(public_id) on delete cascade on update cascade,
member_id wt_user_id references iam_user(public_id) on delete cascade on update cascade,
primary key (group_id, member_id)
);
-- iam_immutable_group_member() ensures that group members are immutable.
create or replace function
iam_immutable_group_member()
returns trigger
as $$
begin
raise exception 'group members are immutable';
end;
$$ language plpgsql;
create trigger
default_create_time_column
before
insert on iam_group_member_user
for each row execute procedure default_create_time();
create trigger iam_immutable_group_member
before
update on iam_group_member_user
for each row execute procedure iam_immutable_group_member();
create trigger
recovery_user_not_allowed_group_member
before
insert on iam_group_member_user
for each row execute procedure recovery_user_not_allowed('member_id');
-- get_scoped_member_id is used by the iam_group_member view as a convient
-- way to create <scope_id>:<member_id> to reference members from
-- other scopes than the group's scope.
create or replace function get_scoped_member_id(group_scope text, member_scope text, member_id text) returns text
as $$
begin
if group_scope = member_scope then
return member_id;
end if;
return member_scope || ':' || member_id;
end;
$$ language plpgsql;
-- iam_group_member provides a consolidated view of group members.
create view iam_group_member as
select
gm.create_time,
gm.group_id,
gm.member_id,
u.scope_id as member_scope_id,
g.scope_id as group_scope_id,
get_scoped_member_id(g.scope_id, u.scope_id, gm.member_id) as scoped_member_id,
'user' as type
from
iam_group_member_user gm,
iam_user u,
iam_group g
where
gm.member_id = u.public_id and
gm.group_id = g.public_id;
`),
7: []byte(`
/*
┌────────────────┐ ┌────────────────┐
│ iam_scope │ │ auth_method │
├────────────────┤ ├────────────────┤
│ public_id (pk) │ ╱│ public_id (pk) │
│ │┼┼───────────○─│ scope_id (fk) │
│ │ ╲│ │
└────────────────┘ └────────────────┘
┼ ┼
┼ ┼
│ │
│ │ ▲fk1
│ │
○ ○
╱│╲ ╱│╲
┌────────────────┐ ┌──────────────────────────┐
│ iam_user │ │ auth_account │
├────────────────┤ ├──────────────────────────┤
│ public_id (pk) │ │ public_id (pk) │
│ scope_id (fk) │ ◀fk2 │ scope_id (fk1) │
│ │┼○──────○┼│ auth_method_id (fk1) │
│ │ │ iam_user_scope_id (fk2) │
└────────────────┘ │ iam_user_id (fk2) │
└──────────────────────────┘
An iam_scope can have 0 to many iam_users.
An iam_scope can have 0 to many auth_methods.
An iam_user belongs to 1 iam_scope.
An auth_method belongs to 1 iam_scope.
An iam_user can have 0 or 1 auth_account.
An auth_account belongs to 0 or 1 iam_user.
An auth_method can have 0 to many auth_accounts.
An auth_account belongs to 1 auth_account.
An auth_account can only be associated with an iam_user in the same scope of
the auth_account's auth_method. Including scope_id in fk1 and fk2 ensures this
restriction is not violated.
Design influenced by:
https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
*/
-- base table for auth methods
create table auth_method (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope(public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
-- base table for auth accounts
create table auth_account (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
scope_id wt_scope_id
not null,
iam_user_id wt_public_id,
-- The auth_account can only be assigned to an iam_user in the same scope as
-- the auth_method the auth_account belongs to. A separate column for
-- iam_user's scope id is needed because using the scope_id column in the
-- foreign key constraint causes an error when the iam_user is deleted but
-- the auth_account still exists. This is a valid scenario since the
-- lifetime of the auth_account is tied to the auth_method not the iam_user.
iam_user_scope_id wt_scope_id,
constraint user_and_auth_account_in_same_scope
check(
(iam_user_id is null and iam_user_scope_id is null)
or
(iam_user_id is not null and (iam_user_scope_id = scope_id))
),
-- including scope_id in fk1 and fk2 ensures the scope_id of the owning
-- auth_method and the scope_id of the owning iam_user are the same
foreign key (scope_id, auth_method_id) -- fk1
references auth_method (scope_id, public_id)
on delete cascade
on update cascade,
foreign key (iam_user_scope_id, iam_user_id) -- fk2
references iam_user (scope_id, public_id)
on delete set null
on update cascade,
unique(scope_id, auth_method_id, public_id)
);
create or replace function
insert_auth_method_subtype()
returns trigger
as $$
begin
insert into auth_method
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
create or replace function
insert_auth_account_subtype()
returns trigger
as $$
begin
select auth_method.scope_id
into new.scope_id
from auth_method
where auth_method.public_id = new.auth_method_id;
insert into auth_account
(public_id, auth_method_id, scope_id)
values
(new.public_id, new.auth_method_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- update_iam_user_auth_account is a before update trigger on the auth_account
-- table. If the new.iam_user_id column is different from the old.iam_user_id
-- column, update_iam_user_auth_account retrieves the scope id of the iam user
-- and sets new.iam_user_scope_id to that value. If the new.iam_user_id column
-- is null and the old.iam_user_id column is not null,
-- update_iam_user_auth_account sets the iam_user_scope_id to null.
create or replace function
update_iam_user_auth_account()
returns trigger
as $$
begin
if new.iam_user_id is distinct from old.iam_user_id then
if new.iam_user_id is null then
new.iam_user_scope_id = null;
else
select iam_user.scope_id into new.iam_user_scope_id
from iam_user
where iam_user.public_id = new.iam_user_id;
end if;
end if;
return new;
end;
$$ language plpgsql;
create trigger update_iam_user_auth_account
before update of iam_user_id on auth_account
for each row
execute procedure update_iam_user_auth_account();
`),
8: []byte(`
-- For now at least the IDs will be the same as the name, because this allows us
-- to not have to persist some generated ID to worker and controller nodes.
-- Eventually we may want them to diverge, so we have both here for now.
create table server (
private_id text,
type text,
name text not null unique
constraint server_name_must_not_be_empty
check(length(trim(name)) > 0),
description text,
address text,
create_time wt_timestamp,
update_time wt_timestamp,
primary key (private_id, type)
);
create trigger
immutable_columns
before
update on server
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on server
for each row execute procedure default_create_time();
create table recovery_nonces (
nonce text
primary key,
create_time wt_timestamp
);
create trigger
default_create_time_column
before
insert on recovery_nonces
for each row execute procedure default_create_time();
create trigger
immutable_columns
before
update on recovery_nonces
for each row execute procedure immutable_columns('nonce', 'create_time');
`),
11: []byte(`
-- an auth token belongs to 1 and only 1 auth account
-- an auth account can have 0 to many auth tokens
create table auth_token (
public_id wt_public_id primary key,
token bytea not null unique,
-- TODO: Make key_id a foreign key once we have DEKs
key_id text not null
constraint key_id_must_not_be_empty
check(length(trim(key_id)) > 0),
auth_account_id wt_public_id not null
references auth_account(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp,
update_time wt_timestamp,
-- This column is not updated every time this auth token is accessed.
-- It is updated after X minutes from the last time it was updated on
-- a per row basis.
approximate_last_access_time wt_timestamp
constraint last_access_time_must_not_be_after_expiration_time
check(
approximate_last_access_time <= expiration_time
),
expiration_time wt_timestamp
constraint create_time_must_not_be_after_expiration_time
check(
create_time <= expiration_time
)
);
create view auth_token_account as
select at.public_id,
at.token,
at.auth_account_id,
at.create_time,
at.update_time,
at.approximate_last_access_time,
at.expiration_time,
aa.scope_id,
aa.iam_user_id,
aa.auth_method_id
from auth_token as at
inner join auth_account as aa
on at.auth_account_id = aa.public_id;
create or replace function
update_last_access_time()
returns trigger
as $$
begin
if new.approximate_last_access_time is distinct from old.approximate_last_access_time then
new.approximate_last_access_time = now();
end if;
return new;
end;
$$ language plpgsql;
comment on function
update_last_access_time()
is
'function used in before update triggers to properly set last_access_time columns';
create or replace function
immutable_auth_token_columns()
returns trigger
as $$
begin
if new.auth_account_id is distinct from old.auth_account_id then
raise exception 'auth_account_id is read-only';
end if;
if new.token is distinct from old.token then
raise exception 'token is read-only';
end if;
return new;
end;
$$ language plpgsql;
comment on function
immutable_auth_token_columns()
is
'function used in before update triggers to make specific columns immutable';
-- This allows the expiration to be calculated on the server side and still hold the constraint that
-- the expiration time cant be before the creation time of the auth token.
create or replace function
expire_time_not_older_than_token()
returns trigger
as $$
begin
if new.expiration_time < new.create_time then
new.expiration_time = new.create_time;
end if;
return new;
end;
$$ language plpgsql;
comment on function
expire_time_not_older_than_token()
is
'function used in before insert triggers to ensure expiration time is not older than create time';
create trigger
default_create_time_column
before insert on auth_token
for each row execute procedure default_create_time();
create trigger
expire_time_not_older_than_token
before insert on auth_token
for each row execute procedure expire_time_not_older_than_token();
create trigger
update_time_column
before update on auth_token
for each row execute procedure update_time_column();
create trigger
update_last_access_time
before update on auth_token
for each row execute procedure update_last_access_time();
create trigger
immutable_auth_token_columns
before update on auth_token
for each row execute procedure immutable_auth_token_columns();
create trigger
immutable_columns
before
update on auth_token
for each row execute procedure immutable_columns('public_id', 'auth_account_id', 'create_time');
`),
12: []byte(`
/*
┌────────────────┐ ┌──────────────────────┐ ┌────────────────────────────┐
│ auth_method │ │ auth_password_method │ │ auth_password_conf │
├────────────────┤ ├──────────────────────┤ ├────────────────────────────┤
│ public_id (pk) │ │ public_id (pk,fk) │ ╱│ private_id (pk,fk) │
│ scope_id (fk) │┼┼─────────────○┼│ scope_id (fk) │┼┼─────────○─│ password_method_id (fk) │
│ │ │ ... │ ╲│ │
└────────────────┘ └──────────────────────┘ └────────────────────────────┘
┼ ┼ ┼
┼ ┼ ┼
│ │ │
│ ▲fk1 │ ▲fk1 │ ▲fk1
│ │ │
○ ○ ○
╱│╲ ╱│╲ ╱│╲
┌──────────────────────────┐ ┌──────────────────────────┐ ┌───────────────────────────────┐
│ auth_account │ │ auth_password_account │ │ auth_password_credential │
├──────────────────────────┤ ├──────────────────────────┤ ├───────────────────────────────┤
│ public_id (pk) │ │ public_id (pk,fk2) │ │ private_id (pk) │
│ scope_id (fk1) │ ◀fk2 │ scope_id (fk1,fk2) │ ◀fk2 │ password_method_id (fk1,fk2) │
│ auth_method_id (fk1) │┼┼──────○┼│ auth_method_id (fk1,fk2) │┼┼──────○┼│ password_conf_id (fk1) │
│ iam_user_scope_id (fk2) │ │ ... │ │ password_account_id (fk2) │
│ iam_user_id (fk2) │ └──────────────────────────┘ └───────────────────────────────┘
└──────────────────────────┘
An auth_method is a base type. An auth_password_method is an auth_method
subtype. For every row in auth_password_method there is one row in auth_method
with the same public_id and scope_id.
Similarly, an auth_account is a base type. An auth_password_account is an
auth_account subtype. For every row in auth_password_account there is one row
in auth_account with the same public_id, scope_id, and auth_method_id.
Both auth_password_conf and auth_password_credential are base types. Each
password key derivation function will require a auth_password_conf and
auth_password_credential table.
An auth_method can have 0 or 1 auth_password_method.
An auth_account can have 0 or 1 auth_password_account.
An auth_password_method belongs to 1 auth_method.
An auth_password_method can have 0 to many auth_password_accounts.
An auth_password_method can have 0 to many auth_password_confs.
An auth_password_account belongs to 1 auth_account.
An auth_password_account belongs to 1 auth_password_method.
An auth_password_account can have 0 or 1 auth_password_credential.
An auth_password_conf belongs to 1 auth_password_method.
An auth_password_conf can have 0 to many auth_password_credentials.
An auth_password_credential belongs to 1 auth_password_account.
An auth_password_credential belongs to 1 auth_password_conf.
*/
create table auth_password_method (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null,
password_conf_id wt_private_id, -- FK to auth_password_conf added below
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
min_login_name_length int
not null
default 3,
min_password_length int
not null
default 8,
version wt_version,
foreign key (scope_id, public_id)
references auth_method (scope_id, public_id)
on delete cascade
on update cascade,
unique(scope_id, name),
unique(scope_id, public_id)
);
create trigger
update_version_column
after update on auth_password_method
for each row execute procedure update_version_column();
create trigger
insert_auth_method_subtype
before insert on auth_password_method
for each row execute procedure insert_auth_method_subtype();
create table auth_password_account (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
-- NOTE(mgaffney): The scope_id type is not wt_scope_id because the domain
-- check is executed before the insert trigger which retrieves the scope_id
-- causing an insert to fail.
scope_id text not null,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
login_name text not null
constraint login_name_must_be_lowercase
check(lower(trim(login_name)) = login_name)
constraint login_name_must_not_be_empty
check(length(trim(login_name)) > 0),
version wt_version,
foreign key (scope_id, auth_method_id)
references auth_password_method (scope_id, public_id)
on delete cascade
on update cascade,
foreign key (scope_id, auth_method_id, public_id)
references auth_account (scope_id, auth_method_id, public_id)
on delete cascade
on update cascade,
unique(auth_method_id, name),
unique(auth_method_id, login_name),
unique(auth_method_id, public_id)
);
create trigger
update_version_column
after update on auth_password_account
for each row execute procedure update_version_column();
create trigger
insert_auth_account_subtype
before insert on auth_password_account
for each row execute procedure insert_auth_account_subtype();
create table auth_password_conf (
private_id wt_private_id
primary key,
password_method_id wt_public_id
not null
references auth_password_method (public_id)
on delete cascade
on update cascade
deferrable initially deferred,
unique(password_method_id, private_id)
);
alter table auth_password_method
add constraint current_conf_fkey
foreign key (public_id, password_conf_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade
deferrable initially deferred;
-- insert_auth_password_conf_subtype() is a trigger function for subtypes of
-- auth_password_conf
create or replace function
insert_auth_password_conf_subtype()
returns trigger
as $$
begin
insert into auth_password_conf
(private_id, password_method_id)
values
(new.private_id, new.password_method_id);
return new;
end;
$$ language plpgsql;
create table auth_password_credential (
private_id wt_private_id
primary key,
password_account_id wt_public_id
not null
unique,
password_conf_id wt_private_id,
password_method_id wt_public_id
not null,
foreign key (password_method_id, password_conf_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade,
foreign key (password_method_id, password_account_id)
references auth_password_account (auth_method_id, public_id)
on delete cascade
on update cascade,
unique(password_method_id, password_conf_id, password_account_id)
);
-- insert_auth_password_credential_subtype() is a trigger function for
-- subtypes of auth_password_credential
create or replace function
insert_auth_password_credential_subtype()
returns trigger
as $$
begin
select auth_password_account.auth_method_id
into new.password_method_id
from auth_password_account
where auth_password_account.public_id = new.password_account_id;
insert into auth_password_credential
(private_id, password_account_id, password_conf_id, password_method_id)
values
(new.private_id, new.password_account_id, new.password_conf_id, new.password_method_id);
return new;
end;
$$ language plpgsql;
-- update_auth_password_credential_subtype() is an after update trigger
-- function for subtypes of auth_password_credential
create or replace function
update_auth_password_credential_subtype()
returns trigger
as $$
begin
/*
The configuration id of a credential is updated when a credential is
rehashed during authentication.
*/
if new.password_conf_id is distinct from old.password_conf_id then
update auth_password_credential
set password_conf_id = new.password_conf_id
where private_id = new.private_id;
end if;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- delete_auth_password_credential_subtype() is an after delete trigger
-- function for subtypes of auth_password_credential
create or replace function
delete_auth_password_credential_subtype()
returns trigger
as $$
begin
delete
from auth_password_credential
where private_id = old.private_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
--
-- triggers for time columns
--
create trigger
update_time_column
before
update on auth_password_method
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_method
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_method
for each row execute procedure default_create_time();
create trigger
update_time_column
before
update on auth_password_account
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_account
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_account
for each row execute procedure default_create_time();
-- The tickets for oplog are the subtypes not the base types because no updates
-- are done to any values in the base types.
insert into oplog_ticket
(name, version)
values
('auth_password_method', 1),
('auth_password_account', 1),
('auth_password_credential', 1);
`),
13: []byte(`
create table auth_password_argon2_conf (
private_id wt_private_id primary key
references auth_password_conf (private_id)
on delete cascade
on update cascade,
password_method_id wt_public_id not null,
create_time wt_timestamp,
iterations int not null default 3
constraint iterations_must_be_greater_than_0
check(iterations > 0),
memory int not null default 65536
constraint memory_must_be_greater_than_0
check(memory > 0),
threads int not null default 1
constraint threads_must_be_greater_than_0
check(threads > 0),
-- salt_length unit is bytes
salt_length int not null default 32
-- minimum of 16 bytes (128 bits)
constraint salt_must_be_at_least_16_bytes
check(salt_length >= 16),
-- key_length unit is bytes
key_length int not null default 32
-- minimum of 16 bytes (128 bits)
constraint key_length_must_be_at_least_16_bytes
check(key_length >= 16),
unique(password_method_id, iterations, memory, threads, salt_length, key_length),
unique (password_method_id, private_id),
foreign key (password_method_id, private_id)
references auth_password_conf (password_method_id, private_id)
on delete cascade
on update cascade
deferrable initially deferred
);
create or replace function
read_only_auth_password_argon2_conf()
returns trigger
as $$
begin
raise exception 'auth_password_argon2_conf is read-only';
end;
$$ language plpgsql;
create trigger
read_only_auth_password_argon2_conf
before
update on auth_password_argon2_conf
for each row execute procedure read_only_auth_password_argon2_conf();
create trigger
insert_auth_password_conf_subtype
before insert on auth_password_argon2_conf
for each row execute procedure insert_auth_password_conf_subtype();
create table auth_password_argon2_cred (
private_id wt_private_id primary key
references auth_password_credential (private_id)
on delete cascade
on update cascade,
password_account_id wt_public_id not null,
password_conf_id wt_private_id,
-- NOTE(mgaffney): The password_method_id type is not wt_public_id because
-- the domain check is executed before the insert trigger which retrieves
-- the password_method_id causing an insert to fail.
password_method_id text not null,
create_time wt_timestamp,
update_time wt_timestamp,
salt bytea not null -- cannot be changed unless derived_key is changed too
constraint salt_must_not_be_empty
check(length(salt) > 0),
derived_key bytea not null
constraint derived_key_must_not_be_empty
check(length(derived_key) > 0),
-- TODO: Make key_id a foreign key once we have DEKs
key_id text not null
constraint key_id_must_not_be_empty
check(length(trim(key_id)) > 0),
foreign key (password_method_id, password_conf_id)
references auth_password_argon2_conf (password_method_id, private_id)
on delete cascade
on update cascade,
foreign key (password_method_id, password_conf_id, password_account_id)
references auth_password_credential (password_method_id, password_conf_id, password_account_id)
on delete cascade
on update cascade
deferrable initially deferred
);
create trigger
insert_auth_password_credential_subtype
before insert on auth_password_argon2_cred
for each row execute procedure insert_auth_password_credential_subtype();
create trigger
update_auth_password_credential_subtype
after update on auth_password_argon2_cred
for each row execute procedure update_auth_password_credential_subtype();
create trigger
delete_auth_password_credential_subtype
after delete on auth_password_argon2_cred
for each row execute procedure delete_auth_password_credential_subtype();
--
-- triggers for time columns
--
create trigger
immutable_columns
before
update on auth_password_argon2_conf
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_argon2_conf
for each row execute procedure default_create_time();
create trigger
update_time_column
before
update on auth_password_argon2_cred
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_password_argon2_cred
for each row execute procedure immutable_columns('create_time');
create trigger
default_create_time_column
before
insert on auth_password_argon2_cred
for each row execute procedure default_create_time();
-- The tickets for oplog are the subtypes not the base types because no updates
-- are done to any values in the base types.
insert into oplog_ticket
(name, version)
values
('auth_password_argon2_conf', 1),
('auth_password_argon2_cred', 1);
`),
14: []byte(`
-- auth_password_conf_union is a union of the configuration settings
-- of all supported key derivation functions.
-- It will be updated as new key derivation functions are supported.
create or replace view auth_password_conf_union as
-- Do not change the order of the columns when adding new configurations.
-- Union with new tables appending new columns as needed.
select c.password_method_id, c.private_id as password_conf_id, c.private_id,
'argon2' as conf_type,
c.iterations, c.memory, c.threads, c.salt_length, c.key_length
from auth_password_argon2_conf c;
-- auth_password_current_conf provides a view of the current password
-- configuration for each password auth method.
-- The view will be updated as new key derivation functions are supported
-- but the query to create the view should not need to be updated.
create or replace view auth_password_current_conf as
-- Rerun this query whenever auth_password_conf_union is updated.
select pm.min_login_name_length, pm.min_password_length, c.*
from auth_password_method pm
inner join auth_password_conf_union c
on pm.password_conf_id = c.password_conf_id;
`),
20: []byte(`
/*
┌─────────────────┐
│ host │
├─────────────────┤
│ public_id (pk) │
│ catalog_id (fk) │
│ │
└─────────────────┘
╲│╱
┌─────────────────┐ ┌─────────────────┐
│ iam_scope │ │ host_catalog │
├─────────────────┤ ├─────────────────┤
│ public_id (pk) │ ╱│ public_id (pk) │
│ │┼┼──────○─│ scope_id (fk) │
│ │ ╲│ │
└─────────────────┘ └─────────────────┘
╱│╲
┌─────────────────┐
│ host_set │
├─────────────────┤
│ public_id (pk) │
│ catalog_id (fk) │
│ │
└─────────────────┘
*/
-- host_catalog
create table host_catalog (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope (public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(scope_id, public_id)
);
create trigger immutable_columns before update on host_catalog
for each row execute procedure immutable_columns('public_id', 'scope_id');
-- insert_host_catalog_subtype() is a before insert trigger
-- function for subtypes of host_catalog
create or replace function insert_host_catalog_subtype()
returns trigger
as $$
begin
insert into host_catalog
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- delete_host_catalog_subtype() is an after delete trigger
-- function for subtypes of host_catalog
create or replace function delete_host_catalog_subtype()
returns trigger
as $$
begin
delete from host_catalog
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- host
create table host (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references host_catalog (public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger immutable_columns before update on host
for each row execute procedure immutable_columns('public_id', 'catalog_id');
-- insert_host_subtype() is a before insert trigger
-- function for subtypes of host
create or replace function insert_host_subtype()
returns trigger
as $$
begin
insert into host
(public_id, catalog_id)
values
(new.public_id, new.catalog_id);
return new;
end;
$$ language plpgsql;
-- delete_host_subtype() is an after delete trigger
-- function for subtypes of host
create or replace function delete_host_subtype()
returns trigger
as $$
begin
delete from host
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- host_set
create table host_set (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references host_catalog (public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger immutable_columns before update on host_set
for each row execute procedure immutable_columns('public_id', 'catalog_id');
-- insert_host_set_subtype() is a before insert trigger
-- function for subtypes of host_set
create or replace function insert_host_set_subtype()
returns trigger
as $$
begin
insert into host_set
(public_id, catalog_id)
values
(new.public_id, new.catalog_id);
return new;
end;
$$ language plpgsql;
-- delete_host_set_subtype() is an after delete trigger
-- function for subtypes of host_set
create or replace function delete_host_set_subtype()
returns trigger
as $$
begin
delete from host_set
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
insert into oplog_ticket (name, version)
values
('host_catalog', 1),
('host', 1),
('host_set', 1);
`),
22: []byte(`
/*
┌─────────────────┐ ┌─────────────────────┐
│ host │ │ static_host │
├─────────────────┤ ├─────────────────────┤
│ public_id (pk) │ │ public_id (pk) │
│ catalog_id (fk) │┼┼──────○┼│ catalog_id (fk) │┼┼─────────────────────┐
│ │ │ address │ ◀fk1 │
└─────────────────┘ └─────────────────────┘ │
╲│╱ ╲│╱ │
○ ○ │
│ │ │
┼ ┼ ○
┼ ┼ ╱│╲
┌─────────────────┐ ┌─────────────────────┐ ┌────────────────────────┐
│ host_catalog │ │ static_host_catalog │ │ static_host_set_member │
├─────────────────┤ ├─────────────────────┤ ├────────────────────────┤
│ public_id (pk) │ │ public_id (pk) │ │ host_id (pk,fk1) │
│ scope_id (fk) │┼┼──────○┼│ scope_id (fk) │ │ set_id (pk,fk2) │
│ │ │ │ │ catalog_id (fk1,fk2) │
└─────────────────┘ └─────────────────────┘ └────────────────────────┘
┼ ┼ ╲│╱
┼ ┼ ○
│ │ │
○ ○ │
╱│╲ ╱│╲ │
┌─────────────────┐ ┌─────────────────────┐ │
│ host_set │ │ static_host_set │ │
├─────────────────┤ ├─────────────────────┤ │
│ public_id (pk) │ │ public_id (pk) │ ◀fk2 │
│ catalog_id (fk) │┼┼──────○┼│ catalog_id (fk) │┼┼─────────────────────┘
│ │ │ │
└─────────────────┘ └─────────────────────┘
*/
create table static_host_catalog (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null
references iam_scope (public_id)
on delete cascade
on update cascade,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
foreign key (scope_id, public_id)
references host_catalog (scope_id, public_id)
on delete cascade
on update cascade,
unique(scope_id, name)
);
create trigger update_version_column after update on static_host_catalog
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host_catalog
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host_catalog
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host_catalog
for each row execute procedure immutable_columns('public_id', 'scope_id','create_time');
create trigger insert_host_catalog_subtype before insert on static_host_catalog
for each row execute procedure insert_host_catalog_subtype();
create trigger delete_host_catalog_subtype after delete on static_host_catalog
for each row execute procedure delete_host_catalog_subtype();
create table static_host (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references static_host_catalog (public_id)
on delete cascade
on update cascade,
name text,
description text,
address text not null
constraint address_must_be_more_than_2_characters
check(length(trim(address)) > 2)
constraint address_must_be_less_than_256_characters
check(length(trim(address)) < 256),
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(catalog_id, name),
foreign key (catalog_id, public_id)
references host (catalog_id, public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
unique(catalog_id, public_id)
);
create trigger update_version_column after update on static_host
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host
for each row execute procedure immutable_columns('public_id', 'catalog_id','create_time');
create trigger insert_host_subtype before insert on static_host
for each row execute procedure insert_host_subtype();
create trigger delete_host_subtype after delete on static_host
for each row execute procedure delete_host_subtype();
create table static_host_set (
public_id wt_public_id primary key,
catalog_id wt_public_id not null
references static_host_catalog (public_id)
on delete cascade
on update cascade,
name text,
description text,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(catalog_id, name),
foreign key (catalog_id, public_id)
references host_set (catalog_id, public_id)
on delete cascade
on update cascade,
unique(catalog_id, public_id)
);
create trigger update_version_column after update on static_host_set
for each row execute procedure update_version_column();
create trigger update_time_column before update on static_host_set
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on static_host_set
for each row execute procedure default_create_time();
create trigger immutable_columns before update on static_host_set
for each row execute procedure immutable_columns('public_id', 'catalog_id','create_time');
create trigger insert_host_set_subtype before insert on static_host_set
for each row execute procedure insert_host_set_subtype();
create trigger delete_host_set_subtype after delete on static_host_set
for each row execute procedure delete_host_set_subtype();
create table static_host_set_member (
host_id wt_public_id not null,
set_id wt_public_id not null,
catalog_id wt_public_id not null,
primary key(host_id, set_id),
foreign key (catalog_id, host_id) -- fk1
references static_host (catalog_id, public_id)
on delete cascade
on update cascade,
foreign key (catalog_id, set_id) -- fk2
references static_host_set (catalog_id, public_id)
on delete cascade
on update cascade
);
create trigger immutable_columns before update on static_host_set_member
for each row execute procedure immutable_columns('host_id', 'set_id', 'catalog_id');
create or replace function insert_static_host_set_member()
returns trigger
as $$
begin
select static_host_set.catalog_id
into new.catalog_id
from static_host_set
where static_host_set.public_id = new.set_id;
return new;
end;
$$ language plpgsql;
create trigger insert_static_host_set_member before insert on static_host_set_member
for each row execute procedure insert_static_host_set_member();
insert into oplog_ticket (name, version)
values
('static_host_catalog', 1),
('static_host', 1),
('static_host_set', 1),
('static_host_set_member', 1);
`),
30: []byte(`
-- kms_version_column() will increment the version column whenever row data
-- is inserted and should only be used in an before insert trigger. This
-- function will overwrite any explicit values to the version column.
create or replace function
kms_version_column()
returns trigger
as $$
declare
_key_id text;
_max bigint;
begin
execute format('SELECT $1.%I', tg_argv[0]) into _key_id using new;
execute format('select max(version) + 1 from %I where %I = $1', tg_relid::regclass, tg_argv[0]) using _key_id into _max;
if _max is null then
_max = 1;
end if;
new.version = _max;
return new;
end;
$$ language plpgsql;
comment on function
kms_version_column()
is
'function used in before insert triggers to properly set version columns for kms_* tables with a version column';
`),
31: []byte(`
/*
┌────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
├────────────────────────────────────────────────────────────────────────────────────────────┐ ○
├────────────────────────────────────────────────────────────────┐ ○ ┼
├────────────────────────────────────┐ ○ ┼ ┌────────────────────────┐
│ ○ ┼ ┌────────────────────────┐ │ kms_token_key │
┼ ┼ ┌────────────────────────┐ │ kms_session_key │ ├────────────────────────┤
┌────────────────────────┐ ┌────────────────────────┐ │ kms_oplog_key │ ├────────────────────────┤ │private_id │
│ kms_root_key │ │ kms_database_key │ ├────────────────────────┤ │private_id │ │root_key_id │
├────────────────────────┤ ├────────────────────────┤ │private_id │ │root_key_id │ │ │
│private_id │ │private_id │ │root_key_id │ │ │ │ │
│scope_id │ │root_key_id │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ └────────────────────────┘
└────────────────────────┘ └────────────────────────┘ └────────────────────────┘ └────────────────────────┘ ┼
┼ ┼ ┼ ┼ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ │ │ │ │
┼ ┼ ┼ ┼ │
╱│╲ ╱│╲ ╱│╲ ╱│╲ ┼
┌────────────────────────┐ ┌────────────────────────┐ ┌────────────────────────┐ ┌────────────────────────┐ ╱│╲
│ kms_root_key_version │ │kms_database_key_version│ │ kms_oplog_key_version │ │kms_session_key_version │ ┌────────────────────────┐
├────────────────────────┤ ├────────────────────────┤ ├────────────────────────┤ ├────────────────────────┤ │ kms_token_key_version │
│private_id │ │private_id │ │private_id │ │private_id │ ├────────────────────────┤
│root_key_id │ │database_key_id │ │oplog_key_id │ │session_key_id │ │private_id │
│key │ │root_key_id │ │root_key_id │ │root_key_id │ │token_key_id │
│version │ │key │ │key │ │key │ │root_key_id │
│ │ │version │ │version │ │version │ │key │
└────────────────────────┘ └────────────────────────┘ │ │ │ │ │version │
┼ ┼ └────────────────────────┘ │ │ │ │
│ ○ ┼ └────────────────────────┘ │ │
├────────────────────────────────────┘ ○ ┼ └────────────────────────┘
├────────────────────────────────────────────────────────────────┘ ○ ┼
├────────────────────────────────────────────────────────────────────────────────────────────┘ ○
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
*/
create table kms_root_key (
private_id wt_private_id primary key,
scope_id wt_scope_id not null unique -- there can only be one root key for a scope.
references iam_scope(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_root_key (all of them)
create trigger
immutable_columns
before
update on kms_root_key
for each row execute procedure immutable_columns('private_id', 'scope_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_root_key
for each row execute procedure default_create_time();
create table kms_root_key_version (
private_id wt_private_id primary key,
root_key_id wt_private_id not null
references kms_root_key(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(root_key_id, version)
);
-- define the immutable fields for kms_root_key_version (all of them)
create trigger
immutable_columns
before
update on kms_root_key_version
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_root_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_root_key_version
for each row execute procedure kms_version_column('root_key_id');
create table kms_database_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one database dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_database_key (all of them)
create trigger
immutable_columns
before
update on kms_database_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_database_key
for each row execute procedure default_create_time();
create table kms_database_key_version (
private_id wt_private_id primary key,
database_key_id wt_private_id not null
references kms_database_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(database_key_id, version)
);
-- define the immutable fields for kms_database_key_version (all of them)
create trigger
immutable_columns
before
update on kms_database_key_version
for each row execute procedure immutable_columns('private_id', 'database_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_database_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_database_key_version
for each row execute procedure kms_version_column('database_key_id');
create table kms_oplog_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one oplog dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_oplog_key (all of them)
create trigger
immutable_columns
before
update on kms_oplog_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_oplog_key
for each row execute procedure default_create_time();
create table kms_oplog_key_version (
private_id wt_private_id primary key,
oplog_key_id wt_private_id not null
references kms_oplog_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(oplog_key_id, version)
);
-- define the immutable fields for kms_oplog_key_version (all of them)
create trigger
immutable_columns
before
update on kms_oplog_key_version
for each row execute procedure immutable_columns('private_id', 'oplog_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_oplog_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_oplog_key_version
for each row execute procedure kms_version_column('oplog_key_id');
create table kms_session_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one session dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_oplog_key (all of them)
create trigger
immutable_columns
before
update on kms_session_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_session_key
for each row execute procedure default_create_time();
create table kms_session_key_version (
private_id wt_private_id primary key,
session_key_id wt_private_id not null
references kms_session_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(session_key_id, version)
);
-- define the immutable fields for kms_session_key_version (all of them)
create trigger
immutable_columns
before
update on kms_session_key_version
for each row execute procedure immutable_columns('private_id', 'session_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_session_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_session_key_version
for each row execute procedure kms_version_column('session_key_id');
create table kms_token_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one token dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_token_key (all of them)
create trigger
immutable_columns
before
update on kms_token_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
create trigger
default_create_time_column
before
insert on kms_token_key
for each row execute procedure default_create_time();
create table kms_token_key_version (
private_id wt_private_id primary key,
token_key_id wt_private_id not null
references kms_token_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(token_key_id, version)
);
-- define the immutable fields for kms_token_key_version (all of them)
create trigger
immutable_columns
before
update on kms_token_key_version
for each row execute procedure immutable_columns('private_id', 'token_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
create trigger
default_create_time_column
before
insert on kms_token_key_version
for each row execute procedure default_create_time();
create trigger
kms_version_column
before insert on kms_token_key_version
for each row execute procedure kms_version_column('token_key_id');
insert into oplog_ticket
(name, version)
values
('kms_root_key', 1),
('kms_root_key_version', 1);
`),
40: []byte(`
-- insert_target_subtype() is a before insert trigger
-- function for subtypes of target
create or replace function
insert_target_subtype()
returns trigger
as $$
begin
insert into target
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- delete_target_subtype() is an after delete trigger
-- function for subtypes of host
create or replace function delete_target_subtype()
returns trigger
as $$
begin
delete from target
where
public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- target_scope_valid() is a before insert trigger function for target
create or replace function
target_scope_valid()
returns trigger
as $$
declare scope_type text;
begin
-- Fetch the type of scope
select isc.type from iam_scope isc where isc.public_id = new.scope_id into scope_type;
if scope_type = 'project' then
return new;
end if;
raise exception 'invalid target scope type % (must be project)', scope_type;
end;
$$ language plpgsql;
-- target_host_set_scope_valid() is a before insert trigger function for target_host_set
create or replace function
target_host_set_scope_valid()
returns trigger
as $$
begin
perform from
host_catalog hc,
host_set hs,
target t,
iam_scope s
where
hc.public_id = hs.catalog_id and
hc.scope_id = t.scope_id and
t.public_id = new.target_id;
if not found then
raise exception 'target scope and host set scope are not equal';
end if;
return new;
end;
$$ language plpgsql;
`),
41: []byte(`
/*
┌─────────────────┐
┌─────────────────┐ │ target_tcp │
│ target │ ├─────────────────┤
├─────────────────┤ │public_id │
│public_id │┼─────────────○┼│scope_id │
│scope_id │ │default_port │
│ │ │name (not null) │
└─────────────────┘ │description │
┼ └─────────────────┘
╱│╲
┌─────────────────┐
│ target_host_set │
├─────────────────┤
│target_id │
│host_set_id │
│ │
└─────────────────┘
╲│╱
┌─────────────────┐
│ host_set │
├─────────────────┤
│public_id │
│catalog_id │
│ │
└─────────────────┘
*/
create table target (
public_id wt_public_id primary key,
scope_id wt_scope_id not null
references iam_scope(public_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
create trigger
immutable_columns
before
update on target
for each row execute procedure immutable_columns('public_id', 'scope_id', 'create_time');
create trigger
default_create_time_column
before
insert on target
for each row execute procedure default_create_time();
create trigger
target_scope_valid
before insert on target
for each row execute procedure target_scope_valid();
create table target_host_set(
target_id wt_public_id
references target(public_id)
on delete cascade
on update cascade,
host_set_id wt_public_id
references host_set(public_id)
on delete cascade
on update cascade,
primary key(target_id, host_set_id),
create_time wt_timestamp
);
create trigger
immutable_columns
before
update on target_host_set
for each row execute procedure immutable_columns('target_id', 'host_set_id', 'create_time');
create trigger
target_host_set_scope_valid
before
insert on target_host_set
for each row execute procedure target_host_set_scope_valid();
create table target_tcp (
public_id wt_public_id primary key
references target(public_id)
on delete cascade
on update cascade,
scope_id wt_scope_id not null
references iam_scope(public_id)
on delete cascade
on update cascade,
name text not null, -- name is not optional for a target subtype
description text,
default_port int, -- default_port can be null
-- max duration of the session in seconds.
-- default is 8 hours
session_max_seconds int not null default 28800
constraint session_max_seconds_must_be_greater_than_0
check(session_max_seconds > 0),
-- limit on number of session connections allowed. -1 equals no limit
session_connection_limit int not null default 1
constraint session_connection_limit_must_be_greater_than_0_or_negative_1
check(session_connection_limit > 0 or session_connection_limit = -1),
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
unique(scope_id, name) -- name must be unique within a scope
);
create trigger
insert_target_subtype
before insert on target_tcp
for each row execute procedure insert_target_subtype();
create trigger
delete_target_subtype
after delete on target_tcp
for each row execute procedure delete_target_subtype();
-- define the immutable fields for target
create trigger
immutable_columns
before
update on target_tcp
for each row execute procedure immutable_columns('public_id', 'scope_id', 'create_time');
create trigger
update_version_column
after update on target_tcp
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on target_tcp
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on target_tcp
for each row execute procedure default_create_time();
create trigger
target_scope_valid
before insert on target_tcp
for each row execute procedure target_scope_valid();
-- target_all_subtypes is a union of all target subtypes
-- NOTE: this is replaced in 100 to add worker_filter
create view target_all_subtypes
as
select
public_id,
scope_id,
name,
description,
default_port,
session_max_seconds,
session_connection_limit,
version,
create_time,
update_time,
'tcp' as type
from target_tcp;
create view target_set
as
select
hs.public_id,
hs.catalog_id,
ths.target_id
from
target_host_set ths,
host_set hs
where
hs.public_id = ths.host_set_id;
insert into oplog_ticket
(name, version)
values
('target_tcp', 1);
`),
50: []byte(`
/*
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ iam_scope_org │ │ iam_user │ │ auth_token │
├─────────────────┤ ├─────────────────┤ ├─────────────────┤
│ public_id (pk) │ │ public_id (pk) │ │ public_id (pk) │
│ │ │ │ │ │
└─────────────────┘ └─────────────────┘ └─────────────────┘
▲fk7 ┼ ▲fk1 ┼ ▲fk6 ┼
┼ ┼ ┼
├─────────────────────────────────┴─────────────────────┘
╱│╲
┌──────────────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ session │╲ fk3▶ │ server │ │ target │
├──────────────────────────┤─○──────○┼├─────────────────┤ ├─────────────────┤
│ public_id (pk) │╱ │ private_id (pk) │ │ public_id (pk) │
│ user_id (fk1) │ │ type (pk) │ │ │
│ host_id (fk2) │ └─────────────────┘ └─────────────────┘
│ server_id (fk3) │ ▲fk4 ┼
│ server_type (fk3) │╲ ┼
│ target_id (fk4) │─○─────────────────┬─────────────────────┤
│ host_set_id (fk5) │╱ ┼ ┼
│ auth_token_id (fk6) │ ▼fk5 ┼ ▼fk2 ┼
│ scope_id (fk7) │ ┌─────────────────┐ ┌─────────────────┐
│ termination_reason (fk8) │ │ host_set │ │ host │
└──────────────────────────┘ ├─────────────────┤ ├─────────────────┤
▲fk1 ┼ ╲│╱ │ public_id (pk) │ │ public_id (pk) │
┼ ○ │ │ │ │
│ │ └─────────────────┘ └─────────────────┘
│ │
└─┐ │
│ │ ┌───────────────────────────────┐
│ │ │session_termination_reason_enm │
│ │ fk8▶ ├───────────────────────────────┤
┼ └──────────○┼│ name │
╱│╲ └───────────────────────────────┘
┌──────────────────────────────────────────┐
│ session_state │
├──────────────────────────────────────────┤┼○┐
│ session_id (pk,fk1,fk2,unq1,unq2) │ │◀fk2
│ state (fk3) │ │
│ previous_end_time (fk2,unq1) │┼○┘
│ start_time (pk) │
│ end_time (unq2) │
└──────────────────────────────────────────┘
╲│╱
▼fk3 ┼
┌───────────────────────────────┐
│ session_state_enm │
├───────────────────────────────┤
│ name │
└───────────────────────────────┘
*/
create table session_termination_reason_enm (
name text primary key
constraint only_predefined_session_termination_reasons_allowed
check (
name in (
'unknown',
'timed out',
'closed by end-user',
'terminated',
'network error',
'system error',
'connection limit',
'canceled'
)
)
);
insert into session_termination_reason_enm (name)
values
('unknown'),
('timed out'),
('closed by end-user'),
('terminated'),
('network error'),
('system error'),
('connection limit'),
('canceled');
-- Note: here, and in the session_connection table, we should add a trigger
-- ensuring that if server_id goes to null, we mark connections as closed. See
-- https://hashicorp.atlassian.net/browse/ICU-1495
create table session (
public_id wt_public_id primary key,
-- the user of the session
user_id text -- fk1
-- not using the wt_user_id domain type because it is marked 'not null'
references iam_user (public_id)
on delete set null
on update cascade,
-- the host the user is connected to via this session
host_id wt_public_id -- fk2
references host (public_id)
on delete set null
on update cascade,
-- the worker proxying the connection between the user and the host
server_id text, -- fk3
server_type text,-- fk3
foreign key (server_id, server_type)
references server (private_id, type)
on delete set null
on update cascade,
-- the target the host was chosen from and the user was authorized to
-- connect to
target_id wt_public_id -- fk4
references target (public_id)
on delete set null
on update cascade,
-- the host set the host was chosen from and the user was authorized to
-- connect to via the target
host_set_id wt_public_id -- fk5
references host_set (public_id)
on delete set null
on update cascade,
-- the auth token of the user when this session was created
auth_token_id wt_public_id -- fk6
references auth_token (public_id)
on delete set null
on update cascade,
-- the project which owns this session
scope_id wt_scope_id -- fk7
references iam_scope_project (scope_id)
on delete set null
on update cascade,
-- Certificate to use when connecting (or if using custom certs, to
-- serve as the "login"). Raw DER bytes.
certificate bytea not null,
-- after this time the connection will be expired, e.g. forcefully terminated
expiration_time wt_timestamp, -- maybe null
-- limit on number of session connections allowed. default of 0 equals no limit
connection_limit int not null default 1
constraint connection_limit_must_be_greater_than_0_or_negative_1
check(connection_limit > 0 or connection_limit = -1),
-- trust of first use token
tofu_token bytea, -- will be null when session is first created
-- the reason this session ended (null until terminated)
-- TODO: Make key_id a foreign key once we have DEKs
key_id text, -- will be null on insert
-- references kms_database_key_version(private_id)
-- on delete restrict
-- on update cascade,
termination_reason text -- fk8
references session_termination_reason_enm (name)
on delete restrict
on update cascade,
version wt_version,
create_time wt_timestamp,
update_time wt_timestamp,
endpoint text -- not part of the warehouse, used to send info to the worker
);
-- Replaced in 100 to add worker_filter
create trigger
immutable_columns
before
update on session
for each row execute procedure immutable_columns('public_id', 'certificate', 'expiration_time', 'connection_limit', 'create_time', 'endpoint');
-- session table has some cascades of FK to null, so we need to be careful
-- which columns trigger an update of the version column
create trigger
update_version_column
after update of version, termination_reason, key_id, tofu_token, server_id, server_type on session
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on session
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on session
for each row execute procedure default_create_time();
create or replace function
insert_session()
returns trigger
as $$
begin
case
when new.user_id is null then
raise exception 'user_id is null';
when new.host_id is null then
raise exception 'host_id is null';
when new.target_id is null then
raise exception 'target_id is null';
when new.host_set_id is null then
raise exception 'host_set_id is null';
when new.auth_token_id is null then
raise exception 'auth_token_id is null';
when new.scope_id is null then
raise exception 'scope_id is null';
when new.endpoint is null then
raise exception 'endpoint is null';
else
end case;
return new;
end;
$$ language plpgsql;
create trigger
insert_session
before insert on session
for each row execute procedure insert_session();
create or replace function
insert_new_session_state()
returns trigger
as $$
begin
insert into session_state (session_id, state)
values
(new.public_id, 'pending');
return new;
end;
$$ language plpgsql;
create trigger
insert_new_session_state
after insert on session
for each row execute procedure insert_new_session_state();
-- update_connection_state_on_closed_reason() is used in an update insert trigger on the
-- session_connection table. it will valiadate that all the session's
-- connections are closed, and then insert a state of "closed" in
-- session_connection_state for the closed session connection.
create or replace function
update_session_state_on_termination_reason()
returns trigger
as $$
begin
if new.termination_reason is not null then
perform from
session
where
public_id = new.public_id and
public_id not in (
select session_id
from session_connection
where
public_id in (
select connection_id
from session_connection_state
where
state != 'closed' and
end_time is null
)
);
if not found then
raise 'session %s has open connections', new.public_id;
end if;
-- check to see if there's a terminated state already, before inserting a
-- new one.
perform from
session_state ss
where
ss.session_id = new.public_id and
ss.state = 'terminated';
if found then
return new;
end if;
insert into session_state (session_id, state)
values
(new.public_id, 'terminated');
end if;
return new;
end;
$$ language plpgsql;
create trigger
update_session_state_on_termination_reason
after update of termination_reason on session
for each row execute procedure update_session_state_on_termination_reason();
-- cancel_session will insert a cancel state for the session, if there's isn't
-- a canceled state already. It's used by cancel_session_with_null_fk.
create or replace function
cancel_session(in sessionId text) returns void
as $$
declare
rows_affected numeric;
begin
insert into session_state(session_id, state)
select
sessionId::text, 'canceling'
from
session s
where
s.public_id = sessionId::text and
s.public_id not in (
select
session_id
from
session_state
where
session_id = sessionId::text and
state = 'canceling'
) limit 1;
get diagnostics rows_affected = row_count;
if rows_affected > 1 then
raise exception 'cancel session: more than one row affected: %', rows_affected;
end if;
end;
$$ language plpgsql;
-- cancel_session_with_null_fk is intended to be a before update trigger that
-- sets the session's state to cancel if a FK is set to null.
create or replace function
cancel_session_with_null_fk()
returns trigger
as $$
begin
case
when new.user_id is null then
perform cancel_session(new.public_id);
when new.host_id is null then
perform cancel_session(new.public_id);
when new.target_id is null then
perform cancel_session(new.public_id);
when new.host_set_id is null then
perform cancel_session(new.public_id);
when new.auth_token_id is null then
perform cancel_session(new.public_id);
when new.scope_id is null then
perform cancel_session(new.public_id);
end case;
return new;
end;
$$ language plpgsql;
create trigger
cancel_session_with_null_fk
before update of user_id, host_id, target_id, host_set_id, auth_token_id, scope_id on session
for each row execute procedure cancel_session_with_null_fk();
create table session_state_enm (
name text primary key
constraint only_predefined_session_states_allowed
check (
name in ('pending', 'active', 'canceling', 'terminated')
)
);
insert into session_state_enm (name)
values
('pending'),
('active'),
('canceling'),
('terminated');
/*
┌────────────────┐
start │ │
. │ Canceling │
(●) ┌────▶│ │─────┐
' │ │ │ │
│ │ └────────────────┘ │
│ │ │
▼ │ ▼
┌────────────────┐ ┌────────────────┐ ┌────────────────┐
│ │ │ │ │ │
│ Pending │ │ Active │ │ Terminated │
│ │──────────▶│ │──────────▶│ │
│ │ │ │ │ │
└────────────────┘ └────────────────┘ └────────────────┘
│ │
│ │
│ │
│ │
└──────────────────────▶ ◉ End ◀────────────────────────┘
*/
-- Design influenced by:
-- Joe Celko's SQL for Smarties: Advanced SQL Programming, 5th edition
-- Chapter 12, p270
create table session_state (
session_id wt_public_id not null -- fk1, fk2
references session (public_id)
on delete cascade
on update cascade,
state text not null -- fk3
references session_state_enm(name)
on delete restrict
on update cascade,
previous_end_time timestamp with time zone, -- fk2 -- null means first state
start_time timestamp with time zone default current_timestamp not null,
constraint previous_end_time_and_start_time_in_sequence
check (previous_end_time <= start_time),
end_time timestamp with time zone, -- null means unfinished current state
constraint start_and_end_times_in_sequence
check (start_time <= end_time),
constraint end_times_in_sequence
check (previous_end_time <> end_time),
primary key (session_id, start_time),
unique (session_id, previous_end_time), -- null means first state
unique (session_id, end_time), -- one null current state
foreign key (session_id, previous_end_time) -- self-reference
references session_state (session_id, end_time)
);
create trigger
immutable_columns
before
update on session_state
for each row execute procedure immutable_columns('session_id', 'state', 'start_time', 'previous_end_time');
create or replace function
insert_session_state()
returns trigger
as $$
begin
update session_state
set end_time = now()
where session_id = new.session_id
and end_time is null;
if not found then
new.previous_end_time = null;
new.start_time = now();
new.end_time = null;
return new;
end if;
new.previous_end_time = now();
new.start_time = now();
new.end_time = null;
return new;
end;
$$ language plpgsql;
create trigger insert_session_state before insert on session_state
for each row execute procedure insert_session_state();
-- Replaced in 100 to add worker_filter
create view session_with_state as
select
s.public_id,
s.user_id,
s.host_id,
s.server_id,
s.server_type,
s.target_id,
s.host_set_id,
s.auth_token_id,
s.scope_id,
s.certificate,
s.expiration_time,
s.connection_limit,
s.tofu_token,
s.key_id,
s.termination_reason,
s.version,
s.create_time,
s.update_time,
s.endpoint,
ss.state,
ss.previous_end_time,
ss.start_time,
ss.end_time
from
session s,
session_state ss
where
s.public_id = ss.session_id;
`),
51: []byte(`
/*
┌────────────────┐
│ session │
├────────────────┤
│ public_id (pk) │
│ │
│ │
└────────────────┘
▲fk1 ┼
╱│╲ ┌──────────────────────────────────────┐
┌───────────────────────┐ │ session_connection_closed_reason_enm │
│ session_connection │╲ fk2▶ ├──────────────────────────────────────┤
├───────────────────────┤─○───○┼│ name │
│ public_id (pk) │╱ └──────────────────────────────────────┘
│ session_id (fk1) │
│ closed_reason (fk2) │
└───────────────────────┘
▲fk1 ┼
╱│╲
┌──────────────────────────────────────────┐
│ session_connection_state │
├──────────────────────────────────────────┤┼○┐
│ connection_id (pk,fk1,fk2,unq1,unq2) │ │◀fk2
│ state (fk3) │ │
│ previous_end_time (fk2,unq1) │┼○┘
│ start_time (pk) │
│ end_time (unq2) │
└──────────────────────────────────────────┘
╲│╱
▼fk3 ┼
┌───────────────────────────────┐
│ session_connection_state_enm │
├───────────────────────────────┤
│ name │
└───────────────────────────────┘
*/
create table session_connection_closed_reason_enm (
name text primary key
constraint only_predefined_session_connection_closed_reasons_allowed
check (
name in (
'unknown',
'timed out',
'closed by end-user',
'canceled',
'network error',
'system error'
)
)
);
insert into session_connection_closed_reason_enm (name)
values
('unknown'),
('timed out'),
('closed by end-user'),
('canceled'),
('network error'),
('system error');
-- A session connection is one connection proxied by a worker from a client to
-- a endpoint for a session. The client initiates the connection to the worker
-- and the worker initiates the connection to the endpoint.
-- A session can have zero or more session connections.
-- Note: Updated to add server_id, server_type in 801
create table session_connection (
public_id wt_public_id primary key,
session_id wt_public_id not null
references session (public_id)
on delete cascade
on update cascade,
-- the client_tcp_address is the network address of the client which initiated
-- the connection to a worker
client_tcp_address inet, -- maybe null on insert
-- the client_tcp_port is the network port at the address of the client the
-- worker proxied a connection for the user
client_tcp_port integer -- maybe null on insert
constraint client_tcp_port_must_be_greater_than_0
check(client_tcp_port > 0)
constraint client_tcp_port_must_less_than_or_equal_to_65535
check(client_tcp_port <= 65535),
-- the endpoint_tcp_address is the network address of the endpoint which the
-- worker initiated the connection to, for the user
endpoint_tcp_address inet, -- maybe be null on insert
-- the endpoint_tcp_port is the network port at the address of the endpoint the
-- worker proxied a connection to, for the user
endpoint_tcp_port integer -- maybe null on insert
constraint endpoint_tcp_port_must_be_greater_than_0
check(endpoint_tcp_port > 0)
constraint endpoint_tcp_port_must_less_than_or_equal_to_65535
check(endpoint_tcp_port <= 65535),
-- the total number of bytes received by the worker from the client and sent
-- to the endpoint for this connection
bytes_up bigint -- can be null
constraint bytes_up_must_be_null_or_a_non_negative_number
check (
bytes_up is null
or
bytes_up >= 0
),
-- the total number of bytes received by the worker from the endpoint and sent
-- to the client for this connection
bytes_down bigint -- can be null
constraint bytes_down_must_be_null_or_a_non_negative_number
check (
bytes_down is null
or
bytes_down >= 0
),
closed_reason text
references session_connection_closed_reason_enm (name)
on delete restrict
on update cascade,
version wt_version,
create_time wt_timestamp,
update_time wt_timestamp
);
create trigger
immutable_columns
before
update on session_connection
for each row execute procedure immutable_columns('public_id', 'session_id', 'create_time');
create trigger
update_version_column
after update on session_connection
for each row execute procedure update_version_column();
create trigger
update_time_column
before update on session_connection
for each row execute procedure update_time_column();
create trigger
default_create_time_column
before
insert on session_connection
for each row execute procedure default_create_time();
-- insert_new_connection_state() is used in an after insert trigger on the
-- session_connection table. it will insert a state of "authorized" in
-- session_connection_state for the new session connection.
create or replace function
insert_new_connection_state()
returns trigger
as $$
begin
insert into session_connection_state (connection_id, state)
values
(new.public_id, 'authorized');
return new;
end;
$$ language plpgsql;
create trigger
insert_new_connection_state
after insert on session_connection
for each row execute procedure insert_new_connection_state();
-- update_connection_state_on_closed_reason() is used in an update trigger on the
-- session_connection table. it will insert a state of "closed" in
-- session_connection_state for the closed session connection.
create or replace function
update_connection_state_on_closed_reason()
returns trigger
as $$
begin
if new.closed_reason is not null then
-- check to see if there's a closed state already, before inserting a
-- new one.
perform from
session_connection_state cs
where
cs.connection_id = new.public_id and
cs.state = 'closed';
if not found then
insert into session_connection_state (connection_id, state)
values
(new.public_id, 'closed');
end if;
-- whenever we close a connection, we want to terminate the session if
-- possible.
perform terminate_session_if_possible(new.session_id);
end if;
return new;
end;
$$ language plpgsql;
create trigger
update_connection_state_on_closed_reason
after update of closed_reason on session_connection
for each row execute procedure update_connection_state_on_closed_reason();
create table session_connection_state_enm (
name text primary key
constraint only_predefined_session_connection_states_allowed
check (
name in ('authorized', 'connected', 'closed')
)
);
insert into session_connection_state_enm (name)
values
('authorized'),
('connected'),
('closed');
create table session_connection_state (
connection_id wt_public_id not null
references session_connection (public_id)
on delete cascade
on update cascade,
state text not null
references session_connection_state_enm(name)
on delete restrict
on update cascade,
previous_end_time timestamp with time zone, -- fk2 -- null means first state
start_time timestamp with time zone default current_timestamp not null,
constraint previous_end_time_and_start_time_in_sequence
check (previous_end_time <= start_time),
end_time timestamp with time zone, -- null means unfinished current state
constraint start_and_end_times_in_sequence
check (start_time <= end_time),
constraint end_times_in_sequence
check (previous_end_time <> end_time),
primary key (connection_id, start_time),
unique (connection_id, previous_end_time), -- null means first state
unique (connection_id, end_time), -- one null current state
foreign key (connection_id, previous_end_time) -- self-reference
references session_connection_state (connection_id, end_time)
);
create trigger
immutable_columns
before
update on session_connection_state
for each row execute procedure immutable_columns('connection_id', 'state', 'start_time', 'previous_end_time');
create or replace function
insert_session_connection_state()
returns trigger
as $$
begin
update session_connection_state
set end_time = now()
where connection_id = new.connection_id
and end_time is null;
if not found then
new.previous_end_time = null;
new.start_time = now();
new.end_time = null;
return new;
end if;
new.previous_end_time = now();
new.start_time = now();
new.end_time = null;
return new;
end;
$$ language plpgsql;
create trigger insert_session_connection_state before insert on session_connection_state
for each row execute procedure insert_session_connection_state();
-- terminate_session_if_possible takes a session id and terminates the session
-- if the following conditions are met:
-- * the session is expired and all its connections are closed.
-- * the session is canceling and all its connections are closed
-- * the session has exhausted its connection limit and all its connections
-- are closed.
--
-- Note: this function should align closely with the domain function
-- TerminateCompletedSessions
create or replace function
terminate_session_if_possible(terminate_session_id text)
returns void
as $$
begin
-- is terminate_session_id in a canceling state
with canceling_session(session_id) as
(
select
session_id
from
session_state ss
where
ss.session_id = terminate_session_id and
ss.state = 'canceling' and
ss.end_time is null
)
update session us
set termination_reason =
case
-- timed out sessions
when now() > us.expiration_time then 'timed out'
-- canceling sessions
when us.public_id in(
select
session_id
from
canceling_session cs
where
us.public_id = cs.session_id
) then 'canceled'
-- default: session connection limit reached.
else 'connection limit'
end
where
-- limit update to just the terminating_session_id
us.public_id = terminate_session_id and
termination_reason is null and
-- session expired or connection limit reached
(
-- expired sessions...
now() > us.expiration_time or
-- connection limit reached...
(
-- handle unlimited connections...
connection_limit != -1 and
(
select count (*)
from session_connection sc
where
sc.session_id = us.public_id
) >= connection_limit
) or
-- canceled sessions
us.public_id in (
select
session_id
from
canceling_session cs
where
us.public_id = cs.session_id
)
) and
-- make sure there are no existing connections
us.public_id not in (
select
session_id
from
session_connection
where public_id in (
select
connection_id
from
session_connection_state
where
state != 'closed' and
end_time is null
)
);
end;
$$ language plpgsql;
`),
60: []byte(`
create extension if not exists "pgcrypto";
create domain wh_inet_port as integer
check(
value > 0
and
value <= 65535
);
comment on domain wh_inet_port is
'An ordinal number between 1 and 65535 representing a network port';
create domain wh_bytes_transmitted as bigint
check(
value >= 0
);
comment on domain wh_bytes_transmitted is
'A non-negative integer representing the number of bytes transmitted';
-- wh_dim_id generates and returns a random ID which should be considered as
-- universally unique.
create or replace function wh_dim_id()
returns text
as $$
select encode(digest(gen_random_bytes(16), 'sha256'), 'base64');
$$ language sql;
create domain wh_dim_id as text
check(
length(trim(value)) > 0
);
comment on domain wh_dim_id is
'Random ID generated with pgcrypto';
create domain wh_public_id as text
check(
value = 'None'
or
length(trim(value)) > 10
);
comment on domain wh_public_id is
'Equivalent to wt_public_id but also allows the value to be ''None''';
create domain wh_timestamp as timestamp with time zone not null;
comment on domain wh_timestamp is
'Timestamp used in warehouse tables';
create domain wh_dim_text as text not null
check(
length(trim(value)) > 0
);
comment on domain wh_dim_text is
'Text fields in dimension tables are always not null and always not empty strings';
-- wh_date_id returns the wh_date_dimension id for ts.
create or replace function wh_date_id(ts wh_timestamp)
returns integer
as $$
select to_char(ts, 'YYYYMMDD')::integer;
$$ language sql;
-- wh_time_id returns the wh_time_of_day_dimension id for ts.
create or replace function wh_time_id(ts wh_timestamp)
returns integer
as $$
select to_char(ts, 'SSSS')::integer;
$$ language sql;
-- wh_date_id returns the wh_date_dimension id for current_timestamp.
create or replace function wh_current_date_id()
returns integer
as $$
select wh_date_id(current_timestamp);
$$ language sql;
-- wh_time_id returns the wh_time_of_day_dimension id for current_timestamp.
create or replace function wh_current_time_id()
returns integer
as $$
select wh_time_id(current_timestamp);
$$ language sql;
`),
62: []byte(`
create table wh_date_dimension (
id integer primary key,
date date not null,
calendar_quarter wh_dim_text,
calendar_month wh_dim_text,
calendar_year smallint not null,
day_of_week wh_dim_text,
day_of_week_number smallint not null,
day_of_week_number_iso smallint not null,
day_of_week_number_zero_based smallint not null,
day_number_in_calendar_month smallint not null,
day_number_in_calendar_year smallint not null,
weekday_indicator wh_dim_text
);
insert into wh_date_dimension (
id, date,
calendar_quarter, calendar_month, calendar_year,
day_of_week, day_of_week_number, day_of_week_number_iso, day_of_week_number_zero_based,
day_number_in_calendar_month, day_number_in_calendar_year,
weekday_indicator
) values (
-1, 'infinity',
'None', 'None', -1,
'None', -1, -1, -1,
-1, -1,
'None'
);
insert
into wh_date_dimension
select to_char(t.day, 'YYYYMMDD')::integer as id,
t.day::date as date,
'Q' || to_char(t.day, 'Q') as calendar_quarter,
to_char(t.day, 'Month') as calendar_month,
extract(year from t.day) as calendar_year,
to_char(t.day, 'Day') as day_of_week,
to_char(t.day, 'D')::int as day_of_week_number,
extract(isodow from t.day) as day_of_week_number_iso,
extract(dow from t.day) as day_of_week_number_zero_based,
extract(day from t.day) as day_number_in_calendar_month,
extract(doy from t.day) as day_number_in_calendar_year,
case extract(isodow from t.day)
when 6 then 'Weekend'
when 7 then 'Weekend'
else 'Weekday'
end as weekday_indicator
from generate_series(
date_trunc('day', timestamp '2019-10-09'),
date_trunc('day', timestamp '2019-10-09' + interval '50 years'),
interval '1 day'
) as t(day);
create table wh_time_of_day_dimension (
id integer primary key,
time_no_zone time not null,
time_at_utc timetz not null,
hour_of_day smallint not null,
minute_of_hour smallint not null,
second_of_minute smallint not null,
display_time_24 wh_dim_text,
display_time_12 wh_dim_text,
meridiem_indicator wh_dim_text
);
set timezone = 'UTC';
insert into wh_time_of_day_dimension (
id, time_no_zone, time_at_utc,
hour_of_day, minute_of_hour, second_of_minute,
display_time_24, display_time_12, meridiem_indicator
) values (
-1, 'allballs', 'allballs',
-1, -1, -1,
'None', 'None', 'None'
);
insert
into wh_time_of_day_dimension
select to_char(t.second, 'SSSS')::integer as id,
t.second::time as time_no_zone,
t.second::time as time_at_utc,
extract(hour from t.second) as hour_of_day,
extract(minute from t.second) as minute_of_hour,
extract(second from t.second) as second_of_minute,
to_char(t.second, 'HH24:MI:SS') as display_time_24,
to_char(t.second, 'HH12:MI:SS AM') as display_time_12,
to_char(t.second, 'PM') as meridiem_indicator
from generate_series(
date_trunc('day', current_timestamp),
date_trunc('day', current_timestamp) + interval '24 hours' - interval '1 second',
interval '1 second'
) as t(second);
`),
65: []byte(`
create table wh_host_dimension (
-- random id generated using encode(digest(gen_random_bytes(16), 'sha256'), 'base64')
-- this is done to prevent conflicts with rows in other clusters
-- which enables warehouse data from multiple clusters to be loaded into a
-- single database instance
id wh_dim_id primary key default wh_dim_id(),
host_id wh_public_id not null,
host_type wh_dim_text,
host_name wh_dim_text,
host_description wh_dim_text,
host_address wh_dim_text,
host_set_id wh_public_id not null,
host_set_type wh_dim_text,
host_set_name wh_dim_text,
host_set_description wh_dim_text,
host_catalog_id wh_public_id not null,
host_catalog_type wh_dim_text,
host_catalog_name wh_dim_text,
host_catalog_description wh_dim_text,
target_id wh_public_id not null,
target_type wh_dim_text,
target_name wh_dim_text,
target_description wh_dim_text,
target_default_port_number integer not null,
target_session_max_seconds integer not null,
target_session_connection_limit integer not null,
project_id wt_scope_id not null,
project_name wh_dim_text,
project_description wh_dim_text,
host_organization_id wt_scope_id not null,
host_organization_name wh_dim_text,
host_organization_description wh_dim_text,
current_row_indicator wh_dim_text,
row_effective_time wh_timestamp,
row_expiration_time wh_timestamp
);
-- https://www.postgresql.org/docs/current/indexes-partial.html
create unique index wh_host_dim_current_constraint
on wh_host_dimension (target_id, host_set_id, host_id)
where current_row_indicator = 'Current';
-- The whx_host_dimension_source and whx_host_dimension_target views are used
-- by an insert trigger to determine if the current row for the dimension has
-- changed and new one needs to be inserted. The first column in the target
-- view must be the current warehouse id and all remaining columns must match
-- the columns in the source view.
-- The whx_host_dimension_source view shows the current values in the
-- operational tables of the host dimension.
create view whx_host_dimension_source as
select -- id is the first column in the target view
h.public_id as host_id,
'static host' as host_type,
coalesce(h.name, 'None') as host_name,
coalesce(h.description, 'None') as host_description,
coalesce(h.address, 'Unknown') as host_address,
s.public_id as host_set_id,
'static host set' as host_set_type,
coalesce(s.name, 'None') as host_set_name,
coalesce(s.description, 'None') as host_set_description,
c.public_id as host_catalog_id,
'static host catalog' as host_catalog_type,
coalesce(c.name, 'None') as host_catalog_name,
coalesce(c.description, 'None') as host_catalog_description,
t.public_id as target_id,
'tcp target' as target_type,
coalesce(t.name, 'None') as target_name,
coalesce(t.description, 'None') as target_description,
coalesce(t.default_port, 0) as target_default_port_number,
t.session_max_seconds as target_session_max_seconds,
t.session_connection_limit as target_session_connection_limit,
p.public_id as project_id,
coalesce(p.name, 'None') as project_name,
coalesce(p.description, 'None') as project_description,
o.public_id as host_organization_id,
coalesce(o.name, 'None') as host_organization_name,
coalesce(o.description, 'None') as host_organization_description
from static_host as h,
static_host_catalog as c,
static_host_set_member as m,
static_host_set as s,
target_host_set as ts,
target_tcp as t,
iam_scope as p,
iam_scope as o
where h.catalog_id = c.public_id
and h.public_id = m.host_id
and s.public_id = m.set_id
and t.public_id = ts.target_id
and s.public_id = ts.host_set_id
and p.public_id = t.scope_id
and p.type = 'project'
and o.public_id = p.parent_id
and o.type = 'org'
;
-- The whx_host_dimension_target view shows the rows in the wh_host_dimension
-- table marked as 'Current'.
create view whx_host_dimension_target as
select id,
host_id,
host_type,
host_name,
host_description,
host_address,
host_set_id,
host_set_type,
host_set_name,
host_set_description,
host_catalog_id,
host_catalog_type,
host_catalog_name,
host_catalog_description,
target_id,
target_type,
target_name,
target_description,
target_default_port_number,
target_session_max_seconds,
target_session_connection_limit,
project_id,
project_name,
project_description,
host_organization_id,
host_organization_name,
host_organization_description
from wh_host_dimension
where current_row_indicator = 'Current'
;
create table wh_user_dimension (
-- random id generated using encode(digest(gen_random_bytes(16), 'sha256'), 'base64')
-- this is done to prevent conflicts with rows in other clusters
-- which enables warehouse data from multiple clusters to be loaded into a
-- single database instance
id wh_dim_id primary key default wh_dim_id(),
user_id wt_user_id not null,
user_name wh_dim_text,
user_description wh_dim_text,
auth_account_id wh_public_id not null,
auth_account_type wh_dim_text,
auth_account_name wh_dim_text,
auth_account_description wh_dim_text,
auth_method_id wh_public_id not null,
auth_method_type wh_dim_text,
auth_method_name wh_dim_text,
auth_method_description wh_dim_text,
user_organization_id wt_scope_id not null,
user_organization_name wh_dim_text,
user_organization_description wh_dim_text,
current_row_indicator wh_dim_text,
row_effective_time wh_timestamp,
row_expiration_time wh_timestamp
);
-- The whx_user_dimension_source and whx_user_dimension_target views are used
-- by an insert trigger to determine if the current row for the dimension has
-- changed and new one needs to be inserted. The first column in the target
-- view must be the current warehouse id and all remaining columns must match
-- the columns in the source view.
-- The whx_user_dimension_source view shows the current values in the
-- operational tables of the user dimension.
create view whx_user_dimension_source as
select -- id is the first column in the target view
u.public_id as user_id,
coalesce(u.name, 'None') as user_name,
coalesce(u.description, 'None') as user_description,
coalesce(aa.public_id, 'None') as auth_account_id,
case when aa.public_id is null then 'None'
else 'password auth account'
end as auth_account_type,
coalesce(apa.name, 'None') as auth_account_name,
coalesce(apa.description, 'None') as auth_account_description,
coalesce(am.public_id, 'None') as auth_method_id,
case when am.public_id is null then 'None'
else 'password auth method'
end as auth_method_type,
coalesce(apm.name, 'None') as auth_method_name,
coalesce(apm.description, 'None') as auth_method_description,
org.public_id as user_organization_id,
coalesce(org.name, 'None') as user_organization_name,
coalesce(org.description, 'None') as user_organization_description
from iam_user as u
left join auth_account as aa on u.public_id = aa.iam_user_id
left join auth_method as am on aa.auth_method_id = am.public_id
left join auth_password_account as apa on aa.public_id = apa.public_id
left join auth_password_method as apm on am.public_id = apm.public_id
join iam_scope as org on u.scope_id = org.public_id
;
-- The whx_user_dimension_target view shows the rows in the wh_user_dimension
-- table marked as 'Current'.
create view whx_user_dimension_target as
select id,
user_id,
user_name,
user_description,
auth_account_id,
auth_account_type,
auth_account_name,
auth_account_description,
auth_method_id,
auth_method_type,
auth_method_name,
auth_method_description,
user_organization_id,
user_organization_name,
user_organization_description
from wh_user_dimension
where current_row_indicator = 'Current'
;
`),
66: []byte(`
-- wh_upsert_host returns the wh_host_dimension id for p_host_id,
-- p_host_set_id, and p_target_id. wh_upsert_host compares the current values
-- in the wh_host_dimension with the current values in the operational tables
-- for the provide parameters. If the values between the operational tables
-- and the wh_host_dimension differ, a new row is inserted in the
-- wh_host_dimension to match the current values in the operational tables and
-- the new id is returned. If the values do not differ, the current id is
-- returned.
create or replace function wh_upsert_host(p_host_id wt_public_id, p_host_set_id wt_public_id, p_target_id wt_public_id)
returns wh_dim_id
as $$
declare
src whx_host_dimension_target%rowtype;
target whx_host_dimension_target%rowtype;
new_row wh_host_dimension%rowtype;
begin
select * into target
from whx_host_dimension_target as t
where t.host_id = p_host_id
and t.host_set_id = p_host_set_id
and t.target_id = p_target_id;
select target.id, t.* into src
from whx_host_dimension_source as t
where t.host_id = p_host_id
and t.host_set_id = p_host_set_id
and t.target_id = p_target_id;
if src is distinct from target then
-- expire the current row
update wh_host_dimension
set current_row_indicator = 'Expired',
row_expiration_time = current_timestamp
where host_id = p_host_id
and host_set_id = p_host_set_id
and target_id = p_target_id
and current_row_indicator = 'Current';
-- insert a new row
insert into wh_host_dimension (
host_id, host_type, host_name, host_description, host_address,
host_set_id, host_set_type, host_set_name, host_set_description,
host_catalog_id, host_catalog_type, host_catalog_name, host_catalog_description,
target_id, target_type, target_name, target_description,
target_default_port_number, target_session_max_seconds, target_session_connection_limit,
project_id, project_name, project_description,
host_organization_id, host_organization_name, host_organization_description,
current_row_indicator, row_effective_time, row_expiration_time
)
select host_id, host_type, host_name, host_description, host_address,
host_set_id, host_set_type, host_set_name, host_set_description,
host_catalog_id, host_catalog_type, host_catalog_name, host_catalog_description,
target_id, target_type, target_name, target_description,
target_default_port_number, target_session_max_seconds, target_session_connection_limit,
project_id, project_name, project_description,
host_organization_id, host_organization_name, host_organization_description,
'Current', current_timestamp, 'infinity'::timestamptz
from whx_host_dimension_source
where host_id = p_host_id
and host_set_id = p_host_set_id
and target_id = p_target_id
returning * into new_row;
return new_row.id;
end if;
return target.id;
end;
$$ language plpgsql;
-- wh_upsert_user returns the wh_user_dimension id for p_user_id and
-- p_auth_token_id. wh_upsert_user compares the current values in the
-- wh_user_dimension with the current values in the operational tables for the
-- provide parameters. If the values between the operational tables and the
-- wh_user_dimension differ, a new row is inserted in the wh_user_dimension to
-- match the current values in the operational tables and the new id is
-- returned. If the values do not differ, the current id is returned.
create or replace function wh_upsert_user(p_user_id wt_user_id, p_auth_token_id wt_public_id)
returns wh_dim_id
as $$
declare
src whx_user_dimension_target%rowtype;
target whx_user_dimension_target%rowtype;
new_row wh_user_dimension%rowtype;
acct_id wt_public_id;
begin
select auth_account_id into strict acct_id
from auth_token
where public_id = p_auth_token_id;
select * into target
from whx_user_dimension_target as t
where t.user_id = p_user_id
and t.auth_account_id = acct_id;
select target.id, t.* into src
from whx_user_dimension_source as t
where t.user_id = p_user_id
and t.auth_account_id = acct_id;
if src is distinct from target then
-- expire the current row
update wh_user_dimension
set current_row_indicator = 'Expired',
row_expiration_time = current_timestamp
where user_id = p_user_id
and auth_account_id = acct_id
and current_row_indicator = 'Current';
-- insert a new row
insert into wh_user_dimension (
user_id, user_name, user_description,
auth_account_id, auth_account_type, auth_account_name, auth_account_description,
auth_method_id, auth_method_type, auth_method_name, auth_method_description,
user_organization_id, user_organization_name, user_organization_description,
current_row_indicator, row_effective_time, row_expiration_time
)
select user_id, user_name, user_description,
auth_account_id, auth_account_type, auth_account_name, auth_account_description,
auth_method_id, auth_method_type, auth_method_name, auth_method_description,
user_organization_id, user_organization_name, user_organization_description,
'Current', current_timestamp, 'infinity'::timestamptz
from whx_user_dimension_source
where user_id = p_user_id
and auth_account_id = acct_id
returning * into new_row;
return new_row.id;
end if;
return target.id;
end;
$$ language plpgsql;
`),
68: []byte(`
-- Column names for numeric fields that are not a measurement end in id or
-- number. This naming convention enables automatic field type detection in
-- certain data analysis tools.
-- https://help.tableau.com/current/pro/desktop/en-us/data_clean_adm.htm
-- The wh_session_accumulating_fact table is an accumulating snapshot.
-- The table wh_session_accumulating_fact is an accumulating fact table.
-- The grain of the fact table is one row per session.
create table wh_session_accumulating_fact (
session_id wt_public_id primary key,
-- auth token id is a degenerate dimension
auth_token_id wt_public_id not null,
-- foreign keys to the dimension tables
host_id wh_dim_id not null
references wh_host_dimension (id)
on delete restrict
on update cascade,
user_id wh_dim_id not null
references wh_user_dimension (id)
on delete restrict
on update cascade,
-- TODO(mgaffney) 09/2020: add dimension and foreign key for the session
-- termination reason
-- date and time foreign keys
session_pending_date_id integer not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_pending_time_id integer not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_pending_time wh_timestamp,
session_active_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_active_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_active_time wh_timestamp default 'infinity'::timestamptz,
session_canceling_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_canceling_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_canceling_time wh_timestamp default 'infinity'::timestamptz,
session_terminated_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
session_terminated_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
session_terminated_time wh_timestamp default 'infinity'::timestamptz,
-- TODO(mgaffney) 09/2020: add columns for session expiration
-- TODO(mgaffney) 09/2020: add connection limit. This may need a dimension
-- table and foreign key column to represent unlimited connections.
-- The total number of connections made during the session.
total_connection_count bigint, -- will be null until the first connection is created
-- The total number of bytes received by workers from the client and sent
-- to the endpoint for this session.
-- total_bytes_up is a fully additive measurement.
total_bytes_up wh_bytes_transmitted, -- will be null until the first connection is closed
-- The total number of bytes received by workers from the endpoint and sent
-- to the client for this session.
-- total_bytes_down is a fully additive measurement.
total_bytes_down wh_bytes_transmitted -- will be null until the first connection is closed
);
-- The wh_session_connection_accumulating_fact table is an accumulating fact table.
-- The grain of the fact table is one row per session connection.
create table wh_session_connection_accumulating_fact (
connection_id wt_public_id primary key,
-- session_id is a degenerate dimension
session_id wt_public_id not null
references wh_session_accumulating_fact (session_id)
on delete cascade
on update cascade,
-- foreign keys to the dimension tables
host_id wh_dim_id not null
references wh_host_dimension (id)
on delete restrict
on update cascade,
user_id wh_dim_id not null
references wh_user_dimension (id)
on delete restrict
on update cascade,
-- TODO(mgaffney) 09/2020: add dimension and foreign key for the connection
-- closed reason
-- date and time foreign keys and timestamps
connection_authorized_date_id integer not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_authorized_time_id integer not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_authorized_time wh_timestamp,
connection_connected_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_connected_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_connected_time wh_timestamp default 'infinity'::timestamptz,
connection_closed_date_id integer default -1 not null
references wh_date_dimension (id)
on delete restrict
on update cascade,
connection_closed_time_id integer default -1 not null
references wh_time_of_day_dimension (id)
on delete restrict
on update cascade,
connection_closed_time wh_timestamp default 'infinity'::timestamptz,
-- TODO(mgaffney) 09/2020: add a connection_duration_in_seconds column
-- The client address and port are degenerate dimensions
client_tcp_address inet, -- can be null
client_tcp_port_number wh_inet_port, -- can be null
-- The endpoint address and port are degenerate dimensions
endpoint_tcp_address inet, -- can be null
endpoint_tcp_port_number wh_inet_port, -- can be null
-- the connection_count must always be 1
-- this is a common pattern in data warehouse models
-- See The Data Warehouse Toolkit, Third Edition
-- by Ralph Kimball and Margy Ross for more information
connection_count smallint default 1 not null
constraint connection_count_must_be_1
check(connection_count = 1),
-- The total number of bytes received by the worker from the client and sent
-- to the endpoint for this connection.
-- bytes_up is a fully additive measurement.
bytes_up wh_bytes_transmitted, -- can be null
-- The total number of bytes received by the worker from the endpoint and sent
-- to the client for this connection.
-- bytes_down is a fully additive measurement.
bytes_down wh_bytes_transmitted -- can be null
);
-- TODO(mgaffney) 09/2020: Research and test if the comment fields are used by
-- data analysis tools.
comment on table wh_session_connection_accumulating_fact is
'The Wh Session Connection Accumulating Fact table is an accumulating fact table. '
'The grain of the fact table is one row per session connection.';
comment on column wh_session_connection_accumulating_fact.bytes_up is
'Bytes Up is the total number of bytes received by the worker from the '
'client and sent to the endpoint for this connection. Bytes Up is a fully '
'additive measurement.';
comment on column wh_session_connection_accumulating_fact.bytes_down is
'Bytes Down is the total number of bytes received by the worker from the '
'endpoint and sent to the client for this connection. Bytes Down is a fully '
'additive measurement.';
create index on wh_session_connection_accumulating_fact(session_id);
`),
69: []byte(`
-- wh_rollup_connections calculates the aggregate values from
-- wh_session_connection_accumulating_fact for p_session_id and updates
-- wh_session_accumulating_fact for p_session_id with those values.
create or replace function wh_rollup_connections(p_session_id wt_public_id)
returns void
as $$
declare
session_row wh_session_accumulating_fact%rowtype;
begin
with
session_totals (session_id, total_connection_count, total_bytes_up, total_bytes_down) as (
select session_id,
sum(connection_count),
sum(bytes_up),
sum(bytes_down)
from wh_session_connection_accumulating_fact
where session_id = p_session_id
group by session_id
)
update wh_session_accumulating_fact
set total_connection_count = session_totals.total_connection_count,
total_bytes_up = session_totals.total_bytes_up,
total_bytes_down = session_totals.total_bytes_down
from session_totals
where wh_session_accumulating_fact.session_id = session_totals.session_id
returning wh_session_accumulating_fact.* into strict session_row;
end;
$$ language plpgsql;
--
-- Session triggers
--
-- wh_insert_session returns an after insert trigger for the session table
-- which inserts a row in wh_session_accumulating_fact for the new session.
-- wh_insert_session also calls the wh_upsert_host and wh_upsert_user
-- functions which can result in new rows in wh_host_dimension and
-- wh_user_dimension respectively.
create or replace function wh_insert_session()
returns trigger
as $$
declare
new_row wh_session_accumulating_fact%rowtype;
begin
with
pending_timestamp (date_dim_id, time_dim_id, ts) as (
select wh_date_id(start_time), wh_time_id(start_time), start_time
from session_state
where session_id = new.public_id
and state = 'pending'
)
insert into wh_session_accumulating_fact (
session_id,
auth_token_id,
host_id,
user_id,
session_pending_date_id,
session_pending_time_id,
session_pending_time
)
select new.public_id,
new.auth_token_id,
wh_upsert_host(new.host_id, new.host_set_id, new.target_id),
wh_upsert_user(new.user_id, new.auth_token_id),
pending_timestamp.date_dim_id,
pending_timestamp.time_dim_id,
pending_timestamp.ts
from pending_timestamp
returning * into strict new_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session
after insert on session
for each row
execute function wh_insert_session();
--
-- Session Connection triggers
--
-- wh_insert_session_connection returns an after insert trigger for the
-- session_connection table which inserts a row in
-- wh_session_connection_accumulating_fact for the new session connection.
-- wh_insert_session_connection also calls wh_rollup_connections which can
-- result in updates to wh_session_accumulating_fact.
create or replace function wh_insert_session_connection()
returns trigger
as $$
declare
new_row wh_session_connection_accumulating_fact%rowtype;
begin
with
authorized_timestamp (date_dim_id, time_dim_id, ts) as (
select wh_date_id(start_time), wh_time_id(start_time), start_time
from session_connection_state
where connection_id = new.public_id
and state = 'authorized'
),
session_dimension (host_dim_id, user_dim_id) as (
select host_id, user_id
from wh_session_accumulating_fact
where session_id = new.session_id
)
insert into wh_session_connection_accumulating_fact (
connection_id,
session_id,
host_id,
user_id,
connection_authorized_date_id,
connection_authorized_time_id,
connection_authorized_time,
client_tcp_address,
client_tcp_port_number,
endpoint_tcp_address,
endpoint_tcp_port_number,
bytes_up,
bytes_down
)
select new.public_id,
new.session_id,
session_dimension.host_dim_id,
session_dimension.user_dim_id,
authorized_timestamp.date_dim_id,
authorized_timestamp.time_dim_id,
authorized_timestamp.ts,
new.client_tcp_address,
new.client_tcp_port,
new.endpoint_tcp_address,
new.endpoint_tcp_port,
new.bytes_up,
new.bytes_down
from authorized_timestamp,
session_dimension
returning * into strict new_row;
perform wh_rollup_connections(new.session_id);
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_connection
after insert on session_connection
for each row
execute function wh_insert_session_connection();
-- wh_update_session_connection returns an after update trigger for the
-- session_connection table which updates a row in
-- wh_session_connection_accumulating_fact for the session connection.
-- wh_update_session_connection also calls wh_rollup_connections which can
-- result in updates to wh_session_accumulating_fact.
create or replace function wh_update_session_connection()
returns trigger
as $$
declare
updated_row wh_session_connection_accumulating_fact%rowtype;
begin
update wh_session_connection_accumulating_fact
set client_tcp_address = new.client_tcp_address,
client_tcp_port_number = new.client_tcp_port,
endpoint_tcp_address = new.endpoint_tcp_address,
endpoint_tcp_port_number = new.endpoint_tcp_port,
bytes_up = new.bytes_up,
bytes_down = new.bytes_down
where connection_id = new.public_id
returning * into strict updated_row;
perform wh_rollup_connections(new.session_id);
return null;
end;
$$ language plpgsql;
create trigger wh_update_session_connection
after update on session_connection
for each row
execute function wh_update_session_connection();
--
-- Session State trigger
--
-- wh_insert_session_state returns an after insert trigger for the
-- session_state table which updates wh_session_accumulating_fact.
create or replace function wh_insert_session_state()
returns trigger
as $$
declare
date_col text;
time_col text;
ts_col text;
q text;
session_row wh_session_accumulating_fact%rowtype;
begin
if new.state = 'pending' then
-- The pending state is the first state which is handled by the
-- wh_insert_session trigger. The update statement in this trigger will
-- fail for the pending state because the row for the session has not yet
-- been inserted into the wh_session_accumulating_fact table.
return null;
end if;
date_col = 'session_' || new.state || '_date_id';
time_col = 'session_' || new.state || '_time_id';
ts_col = 'session_' || new.state || '_time';
q = format('update wh_session_accumulating_fact
set (%I, %I, %I) = (select wh_date_id(%L), wh_time_id(%L), %L::timestamptz)
where session_id = %L
returning *',
date_col, time_col, ts_col,
new.start_time, new.start_time, new.start_time,
new.session_id);
execute q into strict session_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_state
after insert on session_state
for each row
execute function wh_insert_session_state();
--
-- Session Connection State trigger
--
-- wh_insert_session_connection_state returns an after insert trigger for the
-- session_connection_state table which updates
-- wh_session_connection_accumulating_fact.
create or replace function wh_insert_session_connection_state()
returns trigger
as $$
declare
date_col text;
time_col text;
ts_col text;
q text;
connection_row wh_session_connection_accumulating_fact%rowtype;
begin
if new.state = 'authorized' then
-- The authorized state is the first state which is handled by the
-- wh_insert_session_connection trigger. The update statement in this
-- trigger will fail for the authorized state because the row for the
-- session connection has not yet been inserted into the
-- wh_session_connection_accumulating_fact table.
return null;
end if;
date_col = 'connection_' || new.state || '_date_id';
time_col = 'connection_' || new.state || '_time_id';
ts_col = 'connection_' || new.state || '_time';
q = format('update wh_session_connection_accumulating_fact
set (%I, %I, %I) = (select wh_date_id(%L), wh_time_id(%L), %L::timestamptz)
where connection_id = %L
returning *',
date_col, time_col, ts_col,
new.start_time, new.start_time, new.start_time,
new.connection_id);
execute q into strict connection_row;
return null;
end;
$$ language plpgsql;
create trigger wh_insert_session_connection_state
after insert on session_connection_state
for each row
execute function wh_insert_session_connection_state();
`),
1001: []byte(`
-- This series of expressions fixes the primary key on the server table
-- PG 12+
alter table session
drop constraint if exists session_server_id_server_type_fkey;
-- PG 11
alter table session
drop constraint if exists session_server_id_fkey;
alter table server
drop constraint server_pkey;
alter table server
drop column name;
alter table server
add primary key (private_id);
alter table server
add constraint server_id_must_not_be_empty
check(length(trim(private_id)) > 0);
alter table session
add constraint session_server_id_fkey
foreign key (server_id)
references server(private_id)
on delete set null
on update cascade;
create domain wt_bexprfilter as text
check(
value is null
or
(
length(trim(value)) > 0
and
length(trim(value)) <= 2048
)
);
comment on domain wt_bexprfilter is
'Text field with constraints for go-bexpr filters';
-- Add the worker filter to the target_tcp table and session table
alter table target_tcp
add column worker_filter wt_bexprfilter;
alter table session
add column worker_filter wt_bexprfilter;
-- Replace the immutable columns trigger from 50 to add worker_filter
drop trigger immutable_columns on session;
create trigger immutable_columns
before update on session
for each row execute procedure immutable_columns('public_id', 'certificate', 'expiration_time', 'connection_limit', 'create_time', 'endpoint', 'worker_filter');
-- Replaces the view created in 41 to include worker_filter
drop view target_all_subtypes;
create view target_all_subtypes
as
select
public_id,
scope_id,
name,
description,
default_port,
session_max_seconds,
session_connection_limit,
version,
create_time,
update_time,
worker_filter,
'tcp' as type
from target_tcp;
-- Replaces the view created in 50 to include worker_filter
drop view session_with_state;
create view session_with_state as
select
s.public_id,
s.user_id,
s.host_id,
s.server_id,
s.server_type,
s.target_id,
s.host_set_id,
s.auth_token_id,
s.scope_id,
s.certificate,
s.expiration_time,
s.connection_limit,
s.tofu_token,
s.key_id,
s.termination_reason,
s.version,
s.create_time,
s.update_time,
s.endpoint,
s.worker_filter,
ss.state,
ss.previous_end_time,
ss.start_time,
ss.end_time
from
session s,
session_state ss
where
s.public_id = ss.session_id;
create domain wt_tagpair as text
check(
value is not null
and
length(trim(value)) > 0
and
length(trim(value)) <= 512
and
lower(trim(value)) = value
);
comment on domain wt_tagpair is
'Text field with constraints for key/value pairs';
create table server_tag (
server_id text
references server(private_id)
on delete cascade
on update cascade,
key wt_tagpair,
value wt_tagpair,
primary key(server_id, key, value)
);
`),
1002: []byte(`
-- wt_email defines a type for email which must be less than 320 chars and only
-- contain lower case values. The type is defined to allow nulls and not be
-- unique, which can be overriden as needed when used in tables.
create domain wt_email as text
constraint wt_email_too_short
check (length(trim(value)) > 0)
constraint wt_email_too_long
check (length(trim(value)) < 320);
comment on domain wt_email is
'standard column for email addresses';
-- wt_full_name defines a type for a person's full name which must be less than
-- 512 chars. The type is defined to allow nulls and not be unique, which can
-- be overriden as needed when used in tables.
create domain wt_full_name text
constraint wt_full_name_too_short
check (length(trim(value)) > 0)
constraint wt_full_name_too_long
check(length(trim(value)) <= 512); -- gotta pick some upper limit.
comment on domain wt_full_name is
'standard column for the full name of a person';
-- wt_url defines a type for URLs which must be longer that 3 chars and
-- less than 4k chars. It's defined to allow nulls, which can be overridden as
-- needed when used in tables.
create domain wt_url as text
constraint wt_url_too_short
check (length(trim(value)) > 3)
constraint wt_url_too_long
check (length(trim(value)) < 4000)
constraint wt_url_invalid_protocol
check (value ~ 'https?:\/\/*');
comment on domain wt_url is
'standard column for URLs';
-- wt_name defines a type for resource names that must be less than 128 chars.
-- It's defined to allow nulls.
create domain wt_name as text
constraint wt_name_too_short
check (length(trim(value)) > 0)
constraint wt_name_too_long
check (length(trim(value)) < 128);
comment on domain wt_name is
'standard column for resource names';
-- wt_description defines a type for resource descriptions that must be less
-- than 1024 chars. It's defined to allow nulls.
create domain wt_description as text
constraint wt_description_too_short
check (length(trim(value)) > 0)
constraint wt_description_too_long
check (length(trim(value)) < 1024);
comment on domain wt_description is
'standard column for resource descriptions';
`),
1003: []byte(`
-- kms_oidc_key entries are DEKs for encrypting oidc entries.
create table kms_oidc_key (
private_id wt_private_id primary key,
root_key_id wt_private_id not null unique -- there can be only one oidc dek per root key
references kms_root_key(private_id)
on delete cascade
on update cascade,
create_time wt_timestamp
);
-- define the immutable fields for kms_oidc_key (all of them)
create trigger
immutable_columns
before
update on kms_oidc_key
for each row execute procedure immutable_columns('private_id', 'root_key_id', 'create_time');
-- define the value of kms_oidc_key's create_time
create trigger
default_create_time_column
before
insert on kms_oidc_key
for each row execute procedure default_create_time();
-- kms_oidc_key_version entries are version of DEK keys used to encrypt oidc
-- entries.
create table kms_oidc_key_version (
private_id wt_private_id primary key,
oidc_key_id wt_private_id not null
references kms_oidc_key(private_id)
on delete cascade
on update cascade,
root_key_version_id wt_private_id not null
references kms_root_key_version(private_id)
on delete cascade
on update cascade,
version wt_version,
key bytea not null,
create_time wt_timestamp,
unique(oidc_key_id, version)
);
-- define the immutable fields for kms_oidc_key_version (all of them)
create trigger
immutable_columns
before
update on kms_oidc_key_version
for each row execute procedure immutable_columns('private_id', 'oidc_key_id', 'root_key_version_id', 'version', 'key', 'create_time');
-- define the value of kms_oidc_key_version's create_time
create trigger
default_create_time_column
before
insert on kms_oidc_key_version
for each row execute procedure default_create_time();
-- define the value of kms_oidc_key_version's version column
create trigger
kms_version_column
before insert on kms_oidc_key_version
for each row execute procedure kms_version_column('oidc_key_id');
`),
10001: []byte(`
alter table job_run
alter column server_id type text;
alter table job_run
add constraint server_id_must_not_be_empty
check(length(trim(server_id)) > 0);
`),
10002: []byte(`
create function wt_is_sentinel(string text)
returns bool
as $$
select length(trim(leading u&'\fffe ' from string)) > 0 AND starts_with(string, u&'\fffe');
$$ language sql
immutable
returns null on null input;
comment on function wt_is_sentinel is
'wt_is_sentinel returns true if string is a sentinel value';
create domain wt_sentinel as text
constraint wt_sentinel_not_valid
check(
wt_is_sentinel(value)
or
length(trim(u&'\fffe ' from value)) > 0
);
comment on domain wt_sentinel is
'A non-empty string with a Unicode prefix of U+FFFE to indicate it is a sentinel value';
create function wt_to_sentinel(string text)
returns text
as $$
select concat(u&'\fffe', trim(ltrim(string, u&'\fffe ')));
$$ language sql
immutable
returns null on null input;
comment on function wt_to_sentinel is
'wt_to_sentinel takes string and returns it as a wt_sentinel';
`),
10003: []byte(`
-- credential_store
create table credential_store (
public_id wt_public_id primary key,
scope_id wt_scope_id not null
constraint iam_scope_fkey
references iam_scope (public_id)
on delete cascade
on update cascade,
-- The order of columns is important for performance. See:
-- https://dba.stackexchange.com/questions/58970/enforcing-constraints-two-tables-away/58972#58972
-- https://dba.stackexchange.com/questions/27481/is-a-composite-index-also-good-for-queries-on-the-first-field
constraint credential_store_scope_id_public_id_uq
unique(scope_id, public_id)
);
comment on table credential_store is
'credential_store is a base table for the credential store type. '
'Each row is owned by a single scope and maps 1-to-1 to a row in one of the credential store subtype tables.';
create trigger immutable_columns before update on credential_store
for each row execute procedure immutable_columns('public_id', 'scope_id');
-- insert_credential_store_subtype() is a before insert trigger
-- function for subtypes of credential_store
create function insert_credential_store_subtype()
returns trigger
as $$
begin
insert into credential_store
(public_id, scope_id)
values
(new.public_id, new.scope_id);
return new;
end;
$$ language plpgsql;
-- delete_credential_store_subtype() is an after delete trigger
-- function for subtypes of credential_store
create function delete_credential_store_subtype()
returns trigger
as $$
begin
delete from credential_store
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- credential_library
create table credential_library (
public_id wt_public_id primary key,
store_id wt_public_id not null
constraint credential_store_fkey
references credential_store (public_id)
on delete cascade
on update cascade,
constraint credential_library_store_id_public_id_uq
unique(store_id, public_id)
);
comment on table credential_library is
'credential_library is a base table for the credential library type and a child table of credential_store. '
'Each row maps 1-to-1 to a row in one of the credential library subtype tables.';
create trigger immutable_columns before update on credential_library
for each row execute procedure immutable_columns('public_id', 'store_id');
-- insert_credential_library_subtype() is a before insert trigger
-- function for subtypes of credential_library
create function insert_credential_library_subtype()
returns trigger
as $$
begin
insert into credential_library
(public_id, store_id)
values
(new.public_id, new.store_id);
return new;
end;
$$ language plpgsql;
-- delete_credential_library_subtype() is an after delete trigger
-- function for subtypes of credential_library
create function delete_credential_library_subtype()
returns trigger
as $$
begin
delete from credential_library
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- credential
create table credential (
public_id wt_public_id primary key
);
comment on table credential is
'credential is a base table for the credential type. ';
create trigger immutable_columns before update on credential
for each row execute procedure immutable_columns('public_id');
-- insert_credential_subtype() is a before insert trigger
-- function for subtypes of credential
create function insert_credential_subtype()
returns trigger
as $$
begin
insert into credential
(public_id)
values
(new.public_id);
return new;
end;
$$ language plpgsql;
-- delete_credential_subtype() is an after delete trigger
-- function for subtypes of credential
create function delete_credential_subtype()
returns trigger
as $$
begin
delete from credential
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- credential_static
create table credential_static (
public_id wt_public_id primary key
constraint credential_fkey
references credential (public_id)
on delete cascade
on update cascade,
store_id wt_public_id not null
constraint credential_store_fkey
references credential_store (public_id)
on delete cascade
on update cascade,
constraint credential_static_store_id_public_id_uq
unique(store_id, public_id)
);
comment on table credential_static is
'credential_static is a base table for the credential static type. '
'It is a credential subtype and a child table of credential_store. ';
create trigger immutable_columns before update on credential_static
for each row execute procedure immutable_columns('public_id', 'store_id');
create trigger insert_credential_subtype before insert on credential_static
for each row execute procedure insert_credential_subtype();
create trigger delete_credential_subtype after delete on credential_static
for each row execute procedure delete_credential_subtype();
-- insert_credential_static_subtype() is a before insert trigger
-- function for subtypes of credential_static
create function insert_credential_static_subtype()
returns trigger
as $$
begin
insert into credential_static
(public_id, store_id)
values
(new.public_id, new.store_id);
return new;
end;
$$ language plpgsql;
-- delete_credential_static_subtype() is an after delete trigger
-- function for subtypes of credential_static
create function delete_credential_static_subtype()
returns trigger
as $$
begin
delete from credential_static
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
-- credential_dynamic
create table credential_dynamic (
public_id wt_public_id primary key
constraint credential_fkey
references credential (public_id)
on delete cascade
on update cascade,
library_id wt_public_id not null
constraint credential_library_fkey
references credential_library (public_id)
on delete cascade
on update cascade,
constraint credential_dynamic_library_id_public_id_uq
unique(library_id, public_id)
);
comment on table credential_dynamic is
'credential_dynamic is a base table for the credential dynamic type. '
'It is a credential subtype and a child table of credential_library. ';
create trigger immutable_columns before update on credential_dynamic
for each row execute procedure immutable_columns('public_id', 'library_id');
create trigger insert_credential_subtype before insert on credential_dynamic
for each row execute procedure insert_credential_subtype();
create trigger delete_credential_subtype after delete on credential_dynamic
for each row execute procedure delete_credential_subtype();
-- insert_credential_dynamic_subtype() is a before insert trigger
-- function for subtypes of credential_dynamic
create function insert_credential_dynamic_subtype()
returns trigger
as $$
begin
insert into credential_dynamic
(public_id, library_id)
values
(new.public_id, new.library_id);
return new;
end;
$$ language plpgsql;
-- delete_credential_dynamic_subtype() is an after delete trigger
-- function for subtypes of credential_dynamic
create function delete_credential_dynamic_subtype()
returns trigger
as $$
begin
delete from credential_dynamic
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
create table credential_purpose_enm (
name text primary key
constraint only_predefined_credential_purposes_allowed
check (
name in (
'application',
'ingress',
'egress'
)
)
);
comment on table credential_purpose_enm is
'credential_purpose_enm is an enumeration table for credential purposes. '
'It contains rows for representing the application, egress, and ingress credential purposes.';
insert into credential_purpose_enm (name)
values
('application'),
('ingress'),
('egress');
`),
10004: []byte(`
create table credential_vault_store (
public_id wt_public_id primary key,
scope_id wt_scope_id not null
constraint iam_scope_fkey
references iam_scope (public_id)
on delete cascade
on update cascade,
name wt_name,
description wt_description,
create_time wt_timestamp,
update_time wt_timestamp,
-- delete_time is set to indicate the row has been soft deleted
delete_time timestamp with time zone,
version wt_version,
vault_address wt_url not null,
-- the remaining text columns can be null but if they are not null, they
-- cannot contain an empty string
namespace text
constraint namespace_must_not_be_empty
check(length(trim(namespace)) > 0),
ca_cert bytea -- PEM encoded certificate bundle
constraint ca_cert_must_not_be_empty
check(length(ca_cert) > 0),
tls_server_name text
constraint tls_server_name_must_not_be_empty
check(length(trim(tls_server_name)) > 0),
tls_skip_verify boolean default false not null,
constraint credential_store_fkey
foreign key (scope_id, public_id)
references credential_store (scope_id, public_id)
on delete cascade
on update cascade,
constraint credential_vault_store_scope_id_name_uq
unique(scope_id, name)
);
comment on table credential_vault_store is
'credential_vault_store is a table where each row is a resource that represents a vault credential store. '
'It is a credential_store subtype.';
create trigger update_version_column after update on credential_vault_store
for each row execute procedure update_version_column();
create trigger update_time_column before update on credential_vault_store
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on credential_vault_store
for each row execute procedure default_create_time();
create trigger immutable_columns before update on credential_vault_store
for each row execute procedure immutable_columns('public_id', 'scope_id','create_time');
create trigger insert_credential_store_subtype before insert on credential_vault_store
for each row execute procedure insert_credential_store_subtype();
create trigger delete_credential_store_subtype after delete on credential_vault_store
for each row execute procedure delete_credential_store_subtype();
-- before_soft_delete_credential_vault_store is a before update trigger for
-- the credential_vault_store table that makes the delete_time column a
-- set once column. Once the delete_time column is set to a value other than
-- null, it cannot be changed. If the current delete_time of a row is not null
-- and an update contains a value for delete_time different from the current
-- value, this trigger will raise an error with error code 23602 which is a
-- class 23 integrity constraint violation: set_once_violation.
create function before_soft_delete_credential_vault_store()
returns trigger
as $$
begin
if new.delete_time is distinct from old.delete_time then
if old.delete_time is not null then
raise exception 'set_once_violation: %.%', tg_table_name, 'delete_time' using
errcode = '23602',
schema = tg_table_schema,
table = tg_table_name,
column = 'delete_time';
end if;
end if;
return new;
end;
$$ language plpgsql;
create trigger before_soft_delete_credential_vault_store before update on credential_vault_store
for each row execute procedure before_soft_delete_credential_vault_store();
-- after_soft_delete_credential_vault_store is an after update trigger for the
-- credential_vault_store table that performs cleanup actions when a
-- credential store is soft deleted. A credential store is considered "soft
-- deleted" if the delete_time for the row is updated from null to not null.
--
-- When a credential store is soft deleted, this trigger:
-- * marks any active Vault tokens owned by the credential store for revocation
-- * deletes any credential library owned by the credential store
create function after_soft_delete_credential_vault_store()
returns trigger
as $$
begin
if new.delete_time is distinct from old.delete_time then
if old.delete_time is null then
-- mark current and maintaining tokens as revoke
update credential_vault_token
set status = 'revoke'
where store_id = new.public_id
and status in ('current', 'maintaining');
-- delete the store's libraries
delete
from credential_vault_library
where store_id = new.public_id;
end if;
end if;
return null;
end;
$$ language plpgsql;
create trigger after_soft_delete_credential_vault_store after update on credential_vault_store
for each row execute procedure after_soft_delete_credential_vault_store();
create table credential_vault_token_status_enm (
name text primary key
constraint only_predefined_token_statuses_allowed
check (
name in (
'current',
'maintaining',
'revoke',
'revoked',
'expired'
)
)
);
comment on table credential_vault_token_status_enm is
'credential_vault_token_status_enm is an enumeration table for the status of vault tokens. '
'It contains rows for representing the current, maintaining, revoke, revoked, and expired statuses.';
insert into credential_vault_token_status_enm (name)
values
('current'),
('maintaining'),
('revoke'),
('revoked'),
('expired');
create table credential_vault_token (
token_hmac bytea primary key, -- hmac-sha256(token, key(blake2b-256(token_accessor))
token bytea not null, -- encrypted value
store_id wt_public_id not null
constraint credential_vault_store_fkey
references credential_vault_store (public_id)
on delete cascade
on update cascade,
create_time wt_timestamp,
update_time wt_timestamp,
last_renewal_time timestamp with time zone not null,
expiration_time timestamp with time zone not null
constraint last_renewal_time_must_be_before_expiration_time
check(last_renewal_time < expiration_time),
key_id text not null
constraint kms_database_key_version_fkey
references kms_database_key_version (private_id)
on delete restrict
on update cascade,
status text not null
constraint credential_vault_token_status_enm_fkey
references credential_vault_token_status_enm (name)
on delete restrict
on update cascade
);
comment on table credential_vault_token is
'credential_vault_token is a table where each row contains a Vault token for one Vault credential store. '
'A credential store can have only one vault token with the status of current';
comment on column credential_vault_token.token_hmac is
'token_hmac contains the hmac-sha256 value of the token. '
'The hmac key is the blake2b-256 value of the token accessor.';
-- https://www.postgresql.org/docs/current/indexes-partial.html
create unique index credential_vault_token_current_status_constraint
on credential_vault_token (store_id)
where status = 'current';
create index credential_vault_token_expiration_time_ix
on credential_vault_token(expiration_time);
comment on index credential_vault_token_expiration_time_ix is
'the credential_vault_token_expiration_time_ix is used by the token renewal job';
create trigger update_time_column before update on credential_vault_token
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on credential_vault_token
for each row execute procedure default_create_time();
create trigger immutable_columns before update on credential_vault_token
for each row execute procedure immutable_columns('token_hmac', 'token', 'store_id','create_time');
-- insert_credential_vault_token() is a before insert trigger
-- function for credential_vault_token that changes the status of the current
-- token to 'maintaining'
create function insert_credential_vault_token()
returns trigger
as $$
begin
update credential_vault_token
set status = 'maintaining'
where store_id = new.store_id
and status = 'current';
return new;
end;
$$ language plpgsql;
create trigger insert_credential_vault_token before insert on credential_vault_token
for each row execute procedure insert_credential_vault_token();
create table credential_vault_client_certificate (
store_id wt_public_id primary key
constraint credential_vault_store_fkey
references credential_vault_store (public_id)
on delete cascade
on update cascade,
certificate bytea not null -- PEM encoded certificate
constraint certificate_must_not_be_empty
check(length(certificate) > 0),
certificate_key bytea not null -- encrypted PEM encoded private key for certificate
constraint certificate_key_must_not_be_empty
check(length(certificate_key) > 0),
certificate_key_hmac bytea not null
constraint certificate_key_hmac_must_not_be_empty
check(length(certificate_key_hmac) > 0),
key_id text not null
constraint kms_database_key_version_fkey
references kms_database_key_version (private_id)
on delete restrict
on update cascade
);
comment on table credential_vault_client_certificate is
'credential_vault_client_certificate is a table where each row contains a client certificate that a credential_vault_store uses for mTLS when connecting to Vault. '
'A credential_vault_store can have 0 or 1 client certificates.';
create trigger immutable_columns before update on credential_vault_client_certificate
for each row execute procedure immutable_columns('store_id');
create table credential_vault_http_method_enm (
name text primary key
constraint only_predefined_http_methods_allowed
check (
name in (
'GET',
'POST'
)
)
);
comment on table credential_vault_http_method_enm is
'credential_vault_http_method_enm is an enumeration table for the http method used by a vault library when communicating with vault. '
'It contains rows for representing the HTTP GET and the HTTP POST methods.';
insert into credential_vault_http_method_enm (name)
values
('GET'),
('POST');
create table credential_vault_library (
public_id wt_public_id primary key,
store_id wt_public_id not null
constraint credential_vault_store_fkey
references credential_vault_store (public_id)
on delete cascade
on update cascade,
name wt_name,
description wt_description,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
vault_path text not null
constraint vault_path_must_not_be_empty
check(length(trim(vault_path)) > 0),
http_method text not null
constraint credential_vault_http_method_enm_fkey
references credential_vault_http_method_enm (name)
on delete restrict
on update cascade,
http_request_body bytea
constraint http_request_body_only_allowed_with_post_method
check(
http_request_body is null
or
(
http_method = 'POST'
and
length(http_request_body) > 0
)
),
constraint credential_vault_library_store_id_name_uq
unique(store_id, name),
constraint credential_library_fkey
foreign key (store_id, public_id)
references credential_library (store_id, public_id)
on delete cascade
on update cascade,
constraint credential_vault_library_store_id_public_id_uq
unique(store_id, public_id)
);
comment on table credential_vault_library is
'credential_vault_library is a table where each row is a resource that represents a vault credential library. '
'It is a credential_library subtype and a child table of credential_vault_store.';
create trigger update_version_column after update on credential_vault_library
for each row execute procedure update_version_column();
create trigger update_time_column before update on credential_vault_library
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on credential_vault_library
for each row execute procedure default_create_time();
create trigger immutable_columns before update on credential_vault_library
for each row execute procedure immutable_columns('public_id', 'store_id','create_time');
create trigger insert_credential_library_subtype before insert on credential_vault_library
for each row execute procedure insert_credential_library_subtype();
create trigger delete_credential_library_subtype after delete on credential_vault_library
for each row execute procedure delete_credential_library_subtype();
-- before_insert_credential_vault_library is a before insert trigger for
-- the credential_vault_library table that prevents a library from being
-- inserted for a soft deleted credential store.
create function before_insert_credential_vault_library()
returns trigger
as $$
declare
delete_time_val timestamp with time zone;
begin
select delete_time into delete_time_val
from credential_vault_store
where public_id = new.store_id;
if delete_time_val is not null then
raise exception 'foreign_key_violation: %.%', tg_table_name, 'store_id' using
errcode = '23503',
schema = tg_table_schema,
table = tg_table_name,
column = 'store_id';
end if;
return new;
end;
$$ language plpgsql;
create trigger before_insert_credential_vault_library before insert on credential_vault_library
for each row execute procedure before_insert_credential_vault_library();
create table credential_vault_credential_status_enm (
name text primary key
constraint only_predefined_credential_statuses_allowed
check (
name in (
'active',
'revoke',
'revoked',
'expired',
'unknown'
)
)
);
comment on table credential_vault_credential_status_enm is
'credential_vault_credential_status_enm is an enumeration table for the status of vault credentials. '
'It contains rows for representing the active, revoke, revoked, expired, and unknown statuses.';
insert into credential_vault_credential_status_enm (name)
values
('active'),
('revoke'),
('revoked'),
('expired'),
('unknown');
create table credential_vault_credential (
public_id wt_public_id primary key,
library_id wt_public_id
constraint credential_vault_library_fkey
references credential_vault_library (public_id)
on delete set null
on update cascade,
session_id wt_public_id
constraint session_fkey
references session (public_id)
on delete set null
on update cascade,
token_hmac bytea not null
constraint credential_vault_token_fkey
references credential_vault_token (token_hmac)
on delete cascade
on update cascade,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
external_id wt_sentinel not null,
last_renewal_time timestamp with time zone not null,
expiration_time timestamp with time zone not null
constraint last_renewal_time_must_be_before_expiration_time
check(last_renewal_time < expiration_time),
is_renewable boolean not null,
status text not null
constraint credential_vault_credential_status_enm_fkey
references credential_vault_credential_status_enm (name)
on delete restrict
on update cascade,
constraint credential_dynamic_fkey
foreign key (library_id, public_id)
references credential_dynamic (library_id, public_id)
on delete cascade
on update cascade,
constraint credential_vault_credential_library_id_public_id_uq
unique(library_id, public_id)
);
comment on table credential_vault_credential is
'credential_vault_credential is a table where each row contains the lease information for a single Vault secret retrieved from a vault credential library for a session.';
create trigger update_version_column after update on credential_vault_credential
for each row execute procedure update_version_column();
create trigger update_time_column before update on credential_vault_credential
for each row execute procedure update_time_column();
-- update_credential_status_column() is a before update trigger function for
-- credential_vault_credential that changes the status of the credential to 'revoke' if
-- the session_id is updated to null
create function update_credential_status_column()
returns trigger
as $$
begin
if new.session_id is distinct from old.session_id then
if new.session_id is null and old.status = 'active' then
new.status = 'revoke';
end if;
end if;
return new;
end;
$$ language plpgsql;
create trigger update_credential_status_column before update on credential_vault_credential
for each row execute procedure update_credential_status_column();
-- not_null_columns() will make the column names not null which are passed as
-- parameters when the trigger is created. It raises error code 23502 which is a
-- class 23 integrity constraint violation: not null column
create function not_null_columns()
returns trigger
as $$
declare
col_name text;
new_value text;
begin
foreach col_name in array tg_argv loop
execute format('SELECT $1.%I', col_name) into new_value using new;
if new_value is null then
raise exception 'not null column: %.%', tg_table_name, col_name using
errcode = '23502',
schema = tg_table_schema,
table = tg_table_name,
column = col_name;
end if;
end loop;
return new;
end;
$$ language plpgsql;
comment on function not_null_columns() is
'function used in before insert triggers to make columns not null on insert, but are allowed be updated to null';
create trigger not_null_columns before insert on credential_vault_credential
for each row execute procedure not_null_columns('library_id', 'session_id');
create trigger default_create_time_column before insert on credential_vault_credential
for each row execute procedure default_create_time();
create trigger immutable_columns before update on credential_vault_credential
for each row execute procedure immutable_columns('external_id', 'create_time');
create trigger insert_credential_dynamic_subtype before insert on credential_vault_credential
for each row execute procedure insert_credential_dynamic_subtype();
create trigger delete_credential_dynamic_subtype after delete on credential_vault_credential
for each row execute procedure delete_credential_dynamic_subtype();
create index credential_vault_credential_expiration_time_ix
on credential_vault_credential(expiration_time);
comment on index credential_vault_credential_expiration_time_ix is
'the credential_vault_credential_expiration_time_ix is used by the credential renewal job';
insert into oplog_ticket (name, version)
values
('credential_vault_store', 1),
('credential_vault_library', 1),
('credential_vault_credential', 1) ;
create view credential_vault_store_private as
with
active_tokens as (
select token_hmac,
token, -- encrypted
store_id,
create_time,
update_time,
last_renewal_time,
expiration_time,
-- renewal time is the midpoint between the last renewal time and the expiration time
last_renewal_time + (expiration_time - last_renewal_time) / 2 as renewal_time,
key_id,
status
from credential_vault_token
where status in ('current', 'maintaining', 'revoke')
)
select store.public_id as public_id,
store.scope_id as scope_id,
store.name as name,
store.description as description,
store.create_time as create_time,
store.update_time as update_time,
store.delete_time as delete_time,
store.version as version,
store.vault_address as vault_address,
store.namespace as namespace,
store.ca_cert as ca_cert,
store.tls_server_name as tls_server_name,
store.tls_skip_verify as tls_skip_verify,
store.public_id as store_id,
token.token_hmac as token_hmac,
token.token as ct_token, -- encrypted
token.create_time as token_create_time,
token.update_time as token_update_time,
token.last_renewal_time as token_last_renewal_time,
token.expiration_time as token_expiration_time,
token.renewal_time as token_renewal_time,
token.key_id as token_key_id,
token.status as token_status,
cert.certificate as client_cert,
cert.certificate_key as ct_client_key, -- encrypted
cert.certificate_key_hmac as client_cert_key_hmac,
cert.key_id as client_key_id
from credential_vault_store store
left join active_tokens token
on store.public_id = token.store_id
left join credential_vault_client_certificate cert
on store.public_id = cert.store_id;
comment on view credential_vault_store_private is
'credential_vault_store_private is a view where each row contains a credential store and the credential store''s data needed to connect to Vault. '
'The view returns a separate row for each current, maintaining and revoke token; maintaining tokens should only be used for token/credential renewal and revocation. '
'Each row may contain encrypted data. This view should not be used to retrieve data which will be returned external to boundary.';
create view credential_vault_store_public as
select public_id,
scope_id,
name,
description,
create_time,
update_time,
version,
vault_address,
namespace,
ca_cert,
tls_server_name,
tls_skip_verify,
token_hmac,
token_create_time,
token_update_time,
token_last_renewal_time,
token_expiration_time,
client_cert,
client_cert_key_hmac
from credential_vault_store_private
where token_status = 'current'
and delete_time is null;
comment on view credential_vault_store_public is
'credential_vault_store_public is a view where each row contains a credential store. '
'No encrypted data is returned. This view can be used to retrieve data which will be returned external to boundary.';
create view credential_vault_library_private as
select library.public_id as public_id,
library.store_id as store_id,
library.name as name,
library.description as description,
library.create_time as create_time,
library.update_time as update_time,
library.version as version,
library.vault_path as vault_path,
library.http_method as http_method,
library.http_request_body as http_request_body,
store.scope_id as scope_id,
store.vault_address as vault_address,
store.namespace as namespace,
store.ca_cert as ca_cert,
store.tls_server_name as tls_server_name,
store.tls_skip_verify as tls_skip_verify,
store.token_hmac as token_hmac,
store.ct_token as ct_token, -- encrypted
store.token_key_id as token_key_id,
store.client_cert as client_cert,
store.ct_client_key as ct_client_key, -- encrypted
store.client_key_id as client_key_id
from credential_vault_library library
join credential_vault_store_private store
on library.store_id = store.public_id
and store.token_status = 'current';
comment on view credential_vault_library_private is
'credential_vault_library_private is a view where each row contains a credential library and the credential library''s data needed to connect to Vault. '
'Each row may contain encrypted data. This view should not be used to retrieve data which will be returned external to boundary.';
create view credential_vault_credential_private as
select credential.public_id as public_id,
credential.library_id as library_id,
credential.session_id as session_id,
credential.create_time as create_time,
credential.update_time as update_time,
credential.version as version,
credential.external_id as external_id,
credential.last_renewal_time as last_renewal_time,
credential.expiration_time as expiration_time,
credential.is_renewable as is_renewable,
credential.status as status,
credential.last_renewal_time + (credential.expiration_time - credential.last_renewal_time) / 2 as renewal_time,
token.token_hmac as token_hmac,
token.token as ct_token, -- encrypted
token.create_time as token_create_time,
token.update_time as token_update_time,
token.last_renewal_time as token_last_renewal_time,
token.expiration_time as token_expiration_time,
token.key_id as token_key_id,
token.status as token_status,
store.scope_id as scope_id,
store.vault_address as vault_address,
store.namespace as namespace,
store.ca_cert as ca_cert,
store.tls_server_name as tls_server_name,
store.tls_skip_verify as tls_skip_verify,
cert.certificate as client_cert,
cert.certificate_key as ct_client_key, -- encrypted
cert.certificate_key_hmac as client_cert_key_hmac,
cert.key_id as client_key_id
from credential_vault_credential credential
join credential_vault_token token
on credential.token_hmac = token.token_hmac
join credential_vault_store store
on token.store_id = store.public_id
left join credential_vault_client_certificate cert
on store.public_id = cert.store_id
where credential.expiration_time != 'infinity'::date;
comment on view credential_vault_credential_private is
'credential_vault_credential_private is a view where each row contains a credential, '
'the vault token used to issue the credential, and the credential store data needed to connect to Vault. '
'Each row may contain encrypted data. This view should not be used to retrieve data which will be returned external to boundary.';
`),
10005: []byte(`
create table target_credential_library (
target_id wt_public_id not null
constraint target_fkey
references target (public_id)
on delete cascade
on update cascade,
credential_library_id wt_public_id not null
constraint credential_library_fkey
references credential_library (public_id)
on delete cascade
on update cascade,
credential_purpose text not null
constraint credential_purpose_enm_fkey
references credential_purpose_enm (name)
on delete restrict
on update cascade,
create_time wt_timestamp,
primary key(target_id, credential_library_id, credential_purpose)
);
comment on table target_credential_library is
'target_credential_library is a join table between the target and credential_library tables. '
'It also contains the credential purpose the relationship represents.';
create trigger default_create_time_column before insert on target_credential_library
for each row execute procedure default_create_time();
create trigger immutable_columns before update on target_credential_library
for each row execute procedure immutable_columns('target_id', 'credential_library_id', 'credential_purpose', 'create_time');
-- target_library provides the store id along with the other data stored in
-- target_credential_library
create view target_library
as
select
tcl.target_id,
tcl.credential_library_id,
tcl.credential_purpose,
cl.store_id
from
target_credential_library tcl,
credential_library cl
where
cl.public_id = tcl.credential_library_id;
`),
10006: []byte(`
create table session_credential_dynamic (
session_id wt_public_id not null
constraint session_fkey
references session (public_id)
on delete cascade
on update cascade,
library_id wt_public_id not null
constraint credential_library_fkey
references credential_library (public_id)
on delete cascade
on update cascade,
credential_id wt_public_id
constraint credential_dynamic_fkey
references credential_dynamic (public_id)
on delete cascade
on update cascade,
credential_purpose text not null
constraint credential_purpose_fkey
references credential_purpose_enm (name)
on delete restrict
on update cascade,
primary key(session_id, library_id, credential_purpose),
create_time wt_timestamp,
constraint session_credential_dynamic_credential_id_uq
unique(credential_id)
);
comment on table session_credential_dynamic is
'session_credential_dynamic is a join table between the session and dynamic credential tables. '
'It also contains the credential purpose the relationship represents.';
create trigger default_create_time_column before insert on session_credential_dynamic
for each row execute procedure default_create_time();
create trigger immutable_columns before update on session_credential_dynamic
for each row execute procedure immutable_columns('session_id', 'library_id', 'credential_purpose', 'create_time');
-- revoke_credentials revokes any active credentials for a session when the
-- session enters the canceling or terminated states.
create function revoke_credentials()
returns trigger
as $$
begin
if new.state in ('canceling', 'terminated') then
update credential_vault_credential
set status = 'revoke'
where session_id = new.session_id
and status = 'active';
end if;
return new;
end;
$$ language plpgsql;
create trigger revoke_credentials after insert on session_state
for each row execute procedure revoke_credentials();
`),
10007: []byte(`
update credential_vault_credential
set external_id = concat(external_id, u&'\ffff')
where wt_is_sentinel(external_id)
and not starts_with(reverse(external_id), u&'\ffff');
alter domain wt_sentinel
drop constraint wt_sentinel_not_valid;
drop function wt_is_sentinel;
create function wt_is_sentinel(string text)
returns bool
as $$
select starts_with(string, u&'\fffe') and starts_with(reverse(string), u&'\ffff');
$$ language sql
immutable
returns null on null input;
comment on function wt_is_sentinel is
'wt_is_sentinel returns true if string is a sentinel value';
alter domain wt_sentinel
add constraint wt_sentinel_not_valid
check(
wt_is_sentinel(value)
or
length(trim(trailing u&'\ffff' from trim(leading u&'\fffe ' from value))) > 0
);
comment on domain wt_sentinel is
'A non-empty string with a Unicode prefix of U+FFFE and suffix of U+FFFF to indicate it is a sentinel value';
drop function wt_to_sentinel; -- wt_to_sentinel is not needed, dropping and not re-creating
`),
11001: []byte(`
create table server_type_enm (
name text primary key
constraint only_predefined_server_types_allowed
check (
name in (
'controller',
'worker'
)
)
);
comment on table server_type_enm is
'server_type_enm is an enumeration table for server types. '
'It contains rows for representing servers as either a controller or worker.';
insert into server_type_enm (name) values
('controller'),
('worker');
alter table server
add constraint server_type_enm_fkey
foreign key (type) references server_type_enm(name)
on update cascade
on delete restrict;
`),
12001: []byte(`
create function wt_sub_seconds(sec integer, ts timestamp with time zone)
returns timestamp with time zone
as $$
select ts - sec * '1 second'::interval;
$$ language sql
stable
returns null on null input;
comment on function wt_add_seconds is
'wt_sub_seconds returns ts - sec.';
create function wt_sub_seconds_from_now(sec integer)
returns timestamp with time zone
as $$
select wt_sub_seconds(sec, current_timestamp);
$$ language sql
stable
returns null on null input;
comment on function wt_add_seconds_to_now is
'wt_sub_seconds_from_now returns current_timestamp - sec.';
`),
13001: []byte(`
alter table auth_oidc_account
add column token_claims text
constraint token_claims_must_not_be_empty
check(
length(trim(token_claims)) > 0
);
alter table auth_oidc_account
add column userinfo_claims text
constraint userinfo_claims_must_not_be_empty
check(
length(trim(userinfo_claims)) > 0
);
`),
2001: []byte(`
-- log_migration entries represent logs generated during migrations
create table log_migration(
id bigint generated always as identity primary key,
migration_version bigint not null, -- cannot declare FK since the table is truncated during runtime
create_time wt_timestamp,
entry text not null
);
comment on table log_migration is
'log_migration entries are logging output from databaes migrations';
-- log_migration triggers
create trigger
default_create_time_column
before
insert on log_migration
for each row execute procedure default_create_time();
create trigger
immutable_columns
before
update on log_migration
for each row execute procedure immutable_columns('id', 'migration_version', 'create_time', 'entry');
-- log_migration_version() defines a function to be used in a "before update"
-- trigger for log_migrations entries. Its intent: set the log_migration
-- version column to the current migration version.
create or replace function
log_migration_version()
returns trigger
as $$
declare current_version bigint;
begin
select max(version) from boundary_schema_version into current_version;
new.migration_version = current_version;
return new;
end;
$$ language plpgsql;
comment on function log_migration_version() is
'log_migration_version will set the log_migration entries to the current migration version';
create trigger
migration_version_column
before
insert on log_migration
for each row execute procedure log_migration_version();
`),
2003: []byte(`
-- auth_oidc_method_state_enum entries define the possible oidc auth method
-- states.
create table auth_oidc_method_state_enm (
name text primary key
constraint name_only_predefined_oidc_method_states_allowed
check (
name in ('inactive', 'active-private', 'active-public')
)
);
-- populate the values of auth_oidc_method_state_enm
insert into auth_oidc_method_state_enm(name)
values
('inactive'),
('active-private'),
('active-public');
-- define the immutable fields for auth_oidc_method_state_enm (all of them)
create trigger
immutable_columns
before
update on auth_oidc_method_state_enm
for each row execute procedure immutable_columns('name');
-- auth_oidc_signing_alg entries define the supported oidc auth method
-- signing algorithms.
create table auth_oidc_signing_alg_enm (
name text primary key
constraint only_predefined_auth_oidc_signing_algs_allowed
check (
name in (
'RS256',
'RS384',
'RS512',
'ES256',
'ES384',
'ES512',
'PS256',
'PS384',
'PS512',
'EdDSA')
)
);
-- populate the values of auth_oidc_signing_alg
insert into auth_oidc_signing_alg_enm (name)
values
('RS256'),
('RS384'),
('RS512'),
('ES256'),
('ES384'),
('ES512'),
('PS256'),
('PS384'),
('PS512'),
('EdDSA')
;
-- define the immutable fields for auth_oidc_signing_alg (all of them)
create trigger
immutable_columns
before
update on auth_oidc_signing_alg_enm
for each row execute procedure immutable_columns('name');
`),
2004: []byte(`
-- auth_oidc_method entries are the current oidc auth methods configured for
-- existing scopes.
create table auth_oidc_method (
public_id wt_public_id
primary key,
scope_id wt_scope_id
not null,
name wt_name,
description wt_description,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
state text not null
constraint auth_oidc_method_state_enm_fkey
references auth_oidc_method_state_enm(name)
on delete restrict
on update cascade,
disable_discovered_config_validation bool not null default false,
api_url wt_url, -- an address prefix at which the boundary api is reachable.
issuer wt_url,
client_id text -- oidc client identifier issued by the oidc provider.
constraint client_id_not_empty
check(length(trim(client_id)) > 0),
client_secret bytea, -- encrypted oidc client secret issued by the oidc provider.
client_secret_hmac text
constraint client_secret_hmac_not_empty
check(length(trim(client_secret_hmac)) > 0),
key_id wt_private_id not null -- key used to encrypt entries via wrapping wrapper.
constraint kms_database_key_version_fkey
references kms_database_key_version(private_id)
on delete restrict
on update cascade,
constraint key_id_not_empty
check(length(trim(key_id)) > 0),
max_age int -- the allowable elapsed time in secs since the last time the user was authenticated. A value -1 basically forces the IdP to re-authenticate the End-User. Zero is not a valid value.
constraint max_age_not_equal_zero
check(max_age != 0)
constraint max_age_not_less_then_negative_one
check(max_age >= -1),
constraint auth_method_fkey
foreign key (scope_id, public_id)
references auth_method (scope_id, public_id)
on delete cascade
on update cascade,
constraint auth_oidc_method_scope_id_name_uq
unique(scope_id, name),
constraint auth_oidc_method_scope_id_public_id_uq
unique(scope_id, public_id),
constraint auth_oidc_method_scope_id_issuer_client_id_unique
unique(scope_id, issuer, client_id) -- a client_id must be unique for a provider within a scope.
);
comment on table auth_oidc_method is
'auth_oidc_method entries are the current oidc auth methods configured for existing scopes.';
-- auth_oidc_signing_alg entries are the signing algorithms allowed for an oidc
-- auth method. There must be at least one allowed alg for each oidc auth method.
create table auth_oidc_signing_alg (
create_time wt_timestamp,
oidc_method_id wt_public_id
constraint auth_oidc_method_fkey
references auth_oidc_method(public_id)
on delete cascade
on update cascade,
signing_alg_name text
constraint auth_oidc_signing_alg_enm_fkey
references auth_oidc_signing_alg_enm(name)
on delete restrict
on update cascade,
primary key(oidc_method_id, signing_alg_name)
);
comment on table auth_oidc_signing_alg is
'auth_oidc_signing_alg entries are the signing algorithms allowed for an oidc auth method. There must be at least one allowed alg for each oidc auth method';
-- auth_oidc_aud_claim entries are the audience claims for a specific oidc auth
-- method. There can be 0 or more for each parent oidc auth method. If an auth
-- method has any aud claims, an ID token must contain one of them to be valid.
create table auth_oidc_aud_claim (
create_time wt_timestamp,
oidc_method_id wt_public_id
constraint auth_oidc_method_fkey
references auth_oidc_method(public_id)
on delete cascade
on update cascade,
aud_claim text not null
constraint aud_claim_must_not_be_empty
check(length(trim(aud_claim)) > 0)
constraint aud_claim_must_be_less_than_1024_chars
check(length(trim(aud_claim)) < 1024),
primary key(oidc_method_id, aud_claim)
);
comment on table auth_oidc_aud_claim is
'auth_oidc_aud_claim entries are the audience claims for a specific oidc auth method. There can be 0 or more for each parent oidc auth method. If an auth method has any aud claims, an ID token must contain one of them to be valid.';
-- auth_oidc_certificate entries are optional PEM encoded x509 certificates.
-- Each entry is a single certificate. An oidc auth method may have 0 or more
-- of these optional x509s. If an auth method has any cert entries, they are
-- used as trust anchors when connecting to the auth method's oidc provider
-- (instead of the host system's cert chain).
create table auth_oidc_certificate (
create_time wt_timestamp,
oidc_method_id wt_public_id
constraint auth_oidc_method_fkey
references auth_oidc_method(public_id)
on delete cascade
on update cascade,
certificate bytea not null,
primary key(oidc_method_id, certificate)
);
comment on table auth_oidc_certificate is
'auth_oidc_certificate entries are optional PEM encoded x509 certificates. Each entry is a single certificate. An oidc auth method may have 0 or more of these optional x509s. If an auth method has any cert entries, they are used as trust anchors when connecting to the auth methods oidc provider (instead of the host system cert chain)';
-- auth_oidc_account entries are subtypes of auth_account and represent an
-- oidc account.
create table auth_oidc_account (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
-- NOTE(mgaffney): The scope_id type is not wt_scope_id because the domain
-- check is executed before the insert trigger which retrieves the scope_id
-- causing an insert to fail.
scope_id text not null,
name wt_name,
description wt_description,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
issuer wt_url not null, -- case-sensitive URL that maps to an id_token's iss claim,
subject text not null -- case-sensitive string that maps to an id_token's sub claim
constraint subject_must_not_be_empty
check (
length(trim(subject)) > 0
)
constraint subject_must_be_less_than_256_chars
check(
length(trim(subject)) <= 255 -- length limit per OIDC spec
),
full_name wt_full_name, -- may be null and maps to an id_token's name claim
email wt_email, -- may be null and maps to the id_token's email claim
constraint auth_oidc_method_fkey
foreign key (scope_id, auth_method_id)
references auth_oidc_method (scope_id, public_id)
on delete cascade
on update cascade,
constraint auth_account_fkey
foreign key (scope_id, auth_method_id, public_id)
references auth_account (scope_id, auth_method_id, public_id)
on delete cascade
on update cascade,
constraint auth_oidc_account_auth_method_id_name_uq
unique(auth_method_id, name),
-- ###############################################################
-- any change to this constraints name must be aligned with the
-- acctUpsertQuery const in internal/auth/oidc/query.go
-- ###############################################################
constraint auth_oidc_account_auth_method_id_issuer_subject_uq
unique(auth_method_id, issuer, subject), -- subject must be unique for a provider within specific auth method
constraint auth_oidc_account_auth_method_id_public_id_uq
unique(auth_method_id, public_id)
);
comment on table auth_oidc_method is
'auth_oidc_account entries are subtypes of auth_account and represent an oidc account.';
-- auth_oidc_method column triggers
create trigger
insert_auth_method_subtype
before insert on auth_oidc_method
for each row execute procedure insert_auth_method_subtype();
create trigger
update_time_column
before
update on auth_oidc_method
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_oidc_method
for each row execute procedure immutable_columns('public_id', 'scope_id', 'create_time');
create trigger
default_create_time_column
before
insert on auth_oidc_method
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on auth_oidc_method
for each row execute procedure update_version_column();
-- active_auth_oidc_method_must_be_complete() defines a function to be used in
-- a "before update" trigger for auth_oidc_method entries. Its intent: prevent
-- incomplete oidc methods from transitioning out of the "inactive" state.
create or replace function
active_auth_oidc_method_must_be_complete()
returns trigger
as $$
begin
-- validate signing alg
if old.state = 'inactive' and new.state != 'inactive' then
perform
from
auth_oidc_method am
join auth_oidc_signing_alg alg on am.public_id = alg.oidc_method_id
where
new.public_id = am.public_id;
if not found then
raise exception 'an incomplete oidc auth method must remain inactive';
end if;
-- validate issuer
case
when new.issuer != old.issuer then
if length(trim(new.issuer)) = 0 then
raise exception 'empty issuer: an incomplete oidc auth method must remain inactive';
end if;
when new.issuer = old.issuer then
if length(trim(old.issuer)) = 0 then
raise exception 'empty issuer: an incomplete oidc auth method must remain inactive';
end if;
else
end case;
-- validate client_id
case
when new.client_id != old.client_id then
if length(trim(new.client_id)) = 0 then
raise exception 'empty client_id: an incomplete oidc auth method must remain inactive';
end if;
when new.client_id = old.client_id then
if length(trim(old.client_id)) = 0 then
raise exception 'empty client_id: an incomplete oidc auth method must remain inactive';
end if;
else
end case;
-- validate client_secret
case
when new.client_secret != old.client_secret then
if length(new.client_secret) = 0 then
raise exception 'empty client_secret: an incomplete oidc auth method must remain inactive';
end if;
when new.client_secret = old.client_secret then
if length(old.client_secret) = 0 then
raise exception 'empty client_secret: an incomplete oidc auth method must remain inactive';
end if;
else
end case;
end if;
return new;
end;
$$ language plpgsql;
comment on function active_auth_oidc_method_must_be_complete() is
'active_auth_oidc_method_must_be_complete() will raise an error if the oidc auth method is not complete';
create trigger
update_active_auth_oidc_method_must_be_complete
before
update on auth_oidc_method
for each row execute procedure active_auth_oidc_method_must_be_complete();
-- new_auth_oidc_method_must_be_inactive() defines a function to be used in
-- a "before insert" trigger for auth_oidc_method entries. Its intent:
-- only allow "inactive" auth methods to be inserted. Why? there's no way
-- you can insert an entry that's anything but incomplete, since we have a
-- chicken/egg problem: you need the auth method id to create the required
-- signing algs value objects.
create or replace function
new_auth_oidc_method_must_be_inactive()
returns trigger
as $$
begin
if new.state != 'inactive' then
raise exception 'an incomplete oidc method must be inactive';
end if;
end;
$$ language plpgsql;
comment on function new_auth_oidc_method_must_be_inactive() is
'new_auth_oidc_method_must_be_inactive ensures that new incomplete oidc auth methods must remain inactive';
create trigger
new_auth_oidc_method_must_be_inactive
before
insert on auth_oidc_method
for each row execute procedure active_auth_oidc_method_must_be_complete();
-- auth_oidc_account column triggers
create trigger
update_time_column
before
update on auth_oidc_account
for each row execute procedure update_time_column();
create trigger
immutable_columns
before
update on auth_oidc_account
for each row execute procedure immutable_columns('public_id', 'auth_method_id', 'scope_id', 'create_time', 'issuer', 'subject');
create trigger
default_create_time_column
before
insert on auth_oidc_account
for each row execute procedure default_create_time();
create trigger
update_version_column
after update on auth_oidc_account
for each row execute procedure update_version_column();
-- insert_auth_oidc_account_subtype is intended as a before insert
-- trigger on auth_oidc_account. Its purpose is to insert a base
-- auth_account for new oidc accounts. It's a bit different than the
-- standard trigger for this, because it will have conflicting PKs
-- and we just want to "do nothing" on those conflicts, deferring the
-- raising on an error to insert into the auth_oidc_account table.
-- this is all necessary because of we're using predictable public ids
-- for oidc accounts.
create or replace function
insert_auth_oidc_account_subtype()
returns trigger
as $$
begin
select auth_method.scope_id
into new.scope_id
from auth_method
where auth_method.public_id = new.auth_method_id;
insert into auth_account
(public_id, auth_method_id, scope_id)
values
(new.public_id, new.auth_method_id, new.scope_id)
on conflict do nothing;
return new;
end;
$$ language plpgsql;
create trigger
insert_auth_oidc_account_subtype
before insert on auth_oidc_account
for each row execute procedure insert_auth_oidc_account_subtype();
-- triggers for auth_oidc_method children tables: auth_oidc_aud_claim,
-- auth_oidc_certificate, auth_oidc_signing_alg
-- on_delete_active_auth_oidc_method_must_be_complete() defines a function
-- to be used in an "after delete" trigger for auth_oidc_signing_alg
-- Its intent: prevent deletes that would result in an "active" oidc
-- auth method which is incomplete.
create or replace function
on_delete_active_auth_oidc_method_must_be_complete()
returns trigger
as $$
declare am_state text;
declare alg_cnt int;
begin
select
am.state,
count(alg.oidc_method_id) as alg_cnt
from
auth_oidc_method am
left outer join auth_oidc_signing_alg alg on am.public_id = alg.oidc_method_id
where
new.oidc_method_id = am.public_id
group by am.public_id
into am_state, alg_cnt;
if not found then
return new; -- auth method was deleted, so we're done
end if;
if am_state != inactive then
case
when alg_cnt = 0 then
raise exception 'delete would have resulted in an incomplete active oidc auth method with no signing algorithms';
end case;
end if;
return new;
end;
$$ language plpgsql;
comment on function on_delete_active_auth_oidc_method_must_be_complete() is
'on_delete_active_auth_oidc_method_must_be_complete() will raise an error if the oidc auth method is not complete after a delete on algs';
create trigger
default_create_time_column
before
insert on auth_oidc_aud_claim
for each row execute procedure default_create_time();
create trigger
default_create_time_column
before
insert on auth_oidc_certificate
for each row execute procedure default_create_time();
create trigger
default_create_time_column
before
insert on auth_oidc_signing_alg
for each row execute procedure default_create_time();
create trigger
on_delete_active_auth_oidc_method_must_be_complete
after
delete on auth_oidc_signing_alg
for each row execute procedure on_delete_active_auth_oidc_method_must_be_complete();
insert into oplog_ticket (name, version)
values
('auth_oidc_method', 1), -- auth method is the root aggregate itself and all of its value objects.
('auth_oidc_account', 1);
`),
2005: []byte(`
-- auth_token_status_enm entries define the possible auth token
-- states.
create table auth_token_status_enm (
name text primary key
constraint name_only_predefined_auth_token_states_allowed
check (
name in ('auth token pending','token issued', 'authentication failed', 'system error')
)
);
-- populate the values of auth_token_status_enm
insert into auth_token_status_enm(name)
values
('auth token pending'),
('token issued'),
('authentication failed'),
('system error');
-- add the state column with a default to the auth_token table.
alter table auth_token
add column status text
not null
default 'token issued' -- safest default
references auth_token_status_enm(name)
on update cascade
on delete restrict;
create or replace view auth_token_account as
select at.public_id,
at.token,
at.auth_account_id,
at.create_time,
at.update_time,
at.approximate_last_access_time,
at.expiration_time,
aa.scope_id,
aa.iam_user_id,
aa.auth_method_id,
at.status
from auth_token as at
inner join auth_account as aa
on at.auth_account_id = aa.public_id;
`),
2006: []byte(`
-- add the primary_auth_method_id which determines which auth_method is
-- designated as for "account info" in the user's scope. It also determines
-- which auth method is allowed to auto viviify users.
alter table iam_scope
add column primary_auth_method_id wt_public_id -- allowed to be null and is mutable of course.
constraint auth_method_fkey
references auth_method(public_id)
on update cascade
on delete set null;
-- establish a compond fk, but there's no cascading of deletes or updates, since
-- we only want to cascade changes to the primary_auth_method_id portion of
-- the compond fk and that is handled in a separate fk declaration.
alter table iam_scope
add constraint auth_method
foreign key (public_id, primary_auth_method_id)
references auth_method(scope_id, public_id);
-- iam_user_acct_info provides account info for users by determining which
-- auth_method is designated as for "account info" in the user's scope via the
-- scope's primary_auth_method_id. Every sub-type of auth_account must be
-- added to this view's union.
create view iam_acct_info as
select
aa.iam_user_id,
oa.subject as login_name,
oa.full_name as full_name,
oa.email as email
from
iam_scope s,
auth_account aa,
auth_oidc_account oa
where
aa.public_id = oa.public_id and
aa.auth_method_id = s.primary_auth_method_id
union
select
aa.iam_user_id,
pa.login_name,
'' as full_name,
'' as email
from
iam_scope s,
auth_account aa,
auth_password_account pa
where
aa.public_id = pa.public_id and
aa.auth_method_id = s.primary_auth_method_id;
-- iam_user_acct_info provides a simple way to retrieve entries that include
-- both the iam_user fields with an outer join to the user's account info.
create view iam_user_acct_info as
select
u.public_id,
u.scope_id,
u.name,
u.description,
u.create_time,
u.update_time,
u.version,
i.login_name,
i.full_name,
i.email
from
iam_user u
left outer join iam_acct_info i on u.public_id = i.iam_user_id;
`),
2007: []byte(`
-- the intent of this update statement: set the primary auth method for scopes
-- that only have a single auth_password_method, since currently there are only
-- auth_password_methods in boundary. Before this release all
-- auth_password_methods were "basically" primary auth methods and would create
-- an iam_user on first login.
with single_authmethod (scope_id, public_id) as (
select
am.scope_id,
am.public_id
from
auth_password_method am,
(select
scope_id,
count(public_id) as cnt
from
auth_password_method
group by scope_id) as singles
where
am.scope_id = singles.scope_id and
singles.cnt = 1
)
update
iam_scope
set
primary_auth_method_id = p.public_id
from
single_authmethod p
where p.scope_id = iam_scope.public_id;
-- the intent of the insert with select statement: log the scopes that have more
-- than 1 auth method and therefore cannot have their primary auth method
-- automatically set for them.
with many_authmethod (scope_id, authmethod_cnt) as (
select
am.scope_id,
many.cnt
from
auth_password_method am,
(select
scope_id,
count(public_id) as cnt
from
auth_password_method
group by scope_id) as many
where
am.scope_id = many.scope_id and
many.cnt > 1
)
insert into log_migration(entry)
select
distinct concat(
'unable to set primary_auth_method for ',
public_id,
' there were ',
m.authmethod_cnt,
' password auth methods for that scope.'
) as entry
from
iam_scope s,
many_authmethod m
where
s.primary_auth_method_id is null and
s.public_id = m.scope_id;
`),
2010: []byte(`
-- By adding the name column to the base auth method type, the database can
-- ensure that auth method names are unique across all sub types.
alter table auth_method
add column name wt_name;
alter table auth_method
add constraint auth_method_scope_id_name_uq
unique (scope_id, name);
-- the intent of this statement is to update the base type's name with the
-- existing password auth method names.
update auth_method
set name = pw.name
from
auth_password_method pw
where
auth_method.public_id = pw.public_id and
pw.name is not null;
-- insert_auth_method_subtype() is a replacement of the function definition in
-- migration 07_auth.up.sql This new definition also inserts the sub type's name
-- into the base type. The name column must be on the base type, so the database
-- can ensure that auth method names are unique across all sub types.
create or replace function
insert_auth_method_subtype()
returns trigger
as $$
begin
insert into auth_method
(public_id, scope_id, name)
values
(new.public_id, new.scope_id, new.name);
return new;
end;
$$ language plpgsql;
comment on function insert_auth_method_subtype() is
'insert_auth_method_subtype() inserts sub type name into the base type auth method table';
-- update_auth_method_subtype() is a new function intended to be used in "before
-- update" triggers for all auth method sub types. It's purpose is to ensure
-- that the name column is syncronized between the sub and base auth method
-- types. The name column must be on the base type, so the database can ensure
-- that auth method names are unique across all sub types.
create or replace function
update_auth_method_subtype()
returns trigger
as $$
begin
update auth_method set name = new.name where public_id = new.public_id and new.name != name;
return new;
end;
$$ language plpgsql;
comment on function update_auth_method_subtype() is
'update_auth_method_subtype() will update base auth method type name column with new values from sub type';
create trigger
update_auth_method_subtype
before update on auth_oidc_method
for each row execute procedure update_auth_method_subtype();
create trigger
update_auth_method_subtype
before update on auth_password_method
for each row execute procedure update_auth_method_subtype();
-- delete_auth_method_subtype() is an after trigger function for subytypes of
-- auth_method
create or replace function
delete_auth_method_subtype()
returns trigger
as $$
begin
delete from auth_method
where public_id = old.public_id;
return null; -- results are ignore since this is an after trigger.
end;
$$ language plpgsql;
comment on function delete_auth_method_subtype is
'delete_auth_method_subtype() is an after trigger function for subytypes of auth_method';
create trigger
delete_auth_method_subtype
after delete on auth_oidc_method
for each row execute procedure delete_auth_method_subtype();
create trigger
delete_auth_method_subtype
after delete on auth_password_method
for each row execute procedure delete_auth_method_subtype();
`),
2015: []byte(`
-- oidc_auth_method_with_value_obj is useful for reading an oidc auth method
-- with its associated value objects (algs, auds, certs) as columns
-- with | delimited values. The use of the postgres string_agg(...) to
-- aggregate the value objects into a column works because we are only pulling
-- in one column from the associated tables and that value is part of the
-- primary key and unique. This view will make things like recursive listing of
-- oidc auth methods fairly straightforward to implement for the oidc repo.
-- The view also includes an is_primary_auth_method bool
create view oidc_auth_method_with_value_obj as
select
case when s.primary_auth_method_id is not null then
true
else false end
as is_primary_auth_method,
am.public_id,
am.scope_id,
am.name,
am.description,
am.create_time,
am.update_time,
am.version,
am.state,
am.api_url,
am.disable_discovered_config_validation,
am.issuer,
am.client_id,
am.client_secret,
am.client_secret_hmac,
am.key_id,
am.max_age,
-- the string_agg(..) column will be null if there are no associated value objects
string_agg(distinct alg.signing_alg_name, '|') as algs,
string_agg(distinct aud.aud_claim, '|') as auds,
string_agg(distinct cert.certificate, '|') as certs
from
auth_oidc_method am
left outer join iam_scope s on am.public_id = s.primary_auth_method_id
left outer join auth_oidc_signing_alg alg on am.public_id = alg.oidc_method_id
left outer join auth_oidc_aud_claim aud on am.public_id = aud.oidc_method_id
left outer join auth_oidc_certificate cert on am.public_id = cert.oidc_method_id
group by am.public_id, is_primary_auth_method; -- there can be only one public_id + is_primary_auth_method, so group by isn't a problem.
comment on view oidc_auth_method_with_value_obj is
'oidc auth method with its associated value objects (algs, auds, certs) as columns with | delimited values';
`),
2020: []byte(`
-- auth_password_method_with_is_primary is useful for reading a password auth
-- method with a bool to determine if it's the scope's primary auth method.
create view auth_password_method_with_is_primary as
select
case when s.primary_auth_method_id is not null then
true
else false end
as is_primary_auth_method,
am.public_id,
am.scope_id,
am.password_conf_id,
am.name,
am.description,
am.create_time,
am.update_time,
am.version,
am.min_login_name_length,
am.min_password_length
from
auth_password_method am
left outer join iam_scope s on am.public_id = s.primary_auth_method_id;
comment on view auth_password_method_with_is_primary is
'password auth method with an is_primary_auth_method bool';
`),
3001: []byte(`
-- this constraint is intended to ensure that a user cannot have more than one
-- account per auth_method.
--
-- If this constraint causes the migrations to fail the operartor can run the
-- following query to get a list of user ids which have more that one account
-- within the same auth method. At that point, the operator will need to pick
-- which account to keep.
----------------------------------------------------------------------------
-- with too_many_accounts(iam_user_id, acct_count) as (
-- select
-- iam_user_id,
-- count(*) as acct_count
-- from
-- auth_account
-- group by auth_method_id, iam_user_id
-- )
-- select
-- *
-- from
-- too_many_accounts
-- where
-- acct_count > 1;
alter table auth_account
add constraint auth_account_auth_method_id_public_id_uq
unique(auth_method_id, iam_user_id);
`),
3002: []byte(`
drop view iam_user_acct_info;
drop view iam_acct_info;
-- iam_user_acct_info provides account info for users by determining which
-- auth_method is designated as for "account info" in the user's scope via the
-- scope's primary_auth_method_id. Every sub-type of auth_account must be
-- added to this view's union.
create view iam_acct_info as
select
aa.iam_user_id,
oa.subject as login_name,
oa.public_id as primary_account_id,
oa.full_name as full_name,
oa.email as email
from
iam_scope s,
auth_account aa,
auth_oidc_account oa
where
aa.public_id = oa.public_id and
aa.auth_method_id = s.primary_auth_method_id
union
select
aa.iam_user_id,
pa.public_id as primary_account_id,
pa.login_name,
'' as full_name,
'' as email
from
iam_scope s,
auth_account aa,
auth_password_account pa
where
aa.public_id = pa.public_id and
aa.auth_method_id = s.primary_auth_method_id;
-- iam_user_acct_info provides a simple way to retrieve entries that include
-- both the iam_user fields with an outer join to the user's account info.
create view iam_user_acct_info as
select
u.public_id,
u.scope_id,
u.name,
u.description,
u.create_time,
u.update_time,
u.version,
i.primary_account_id,
i.login_name,
i.full_name,
i.email
from
iam_user u
left outer join iam_acct_info i on u.public_id = i.iam_user_id;
`),
4001: []byte(`
-- fix ordering of fields in iam_acct_info for auth_password_account select
-- portion of union. requires recreating both views because of deps.
drop view iam_user_acct_info;
drop view iam_acct_info;
create view iam_acct_info as
select
aa.iam_user_id,
oa.subject as login_name,
oa.public_id as primary_account_id,
oa.full_name as full_name,
oa.email as email
from
iam_scope s,
auth_account aa,
auth_oidc_account oa
where
aa.public_id = oa.public_id and
aa.auth_method_id = s.primary_auth_method_id
union
select
aa.iam_user_id,
pa.login_name,
pa.public_id as primary_account_id,
'' as full_name,
'' as email
from
iam_scope s,
auth_account aa,
auth_password_account pa
where
aa.public_id = pa.public_id and
aa.auth_method_id = s.primary_auth_method_id;
create view iam_user_acct_info as
select
u.public_id,
u.scope_id,
u.name,
u.description,
u.create_time,
u.update_time,
u.version,
i.primary_account_id,
i.login_name,
i.full_name,
i.email
from
iam_user u
left outer join iam_acct_info i on u.public_id = i.iam_user_id;
`),
5001: []byte(`
-- auth_oidc_scope entries are the optional scopes for a specific oidc auth
-- method. There can be 0 or more for each parent oidc auth method. If an auth
-- method has any scopes, they will be added to provider requests along with the
-- default of "openid".
create table auth_oidc_scope (
create_time wt_timestamp,
oidc_method_id wt_public_id
constraint auth_oidc_method_fkey
references auth_oidc_method(public_id)
on delete cascade
on update cascade,
scope text not null
constraint scope_must_not_be_empty
check(length(trim(scope)) > 0)
constraint scope_must_be_less_than_1024_chars
check(length(trim(scope)) < 1024)
constraint scope_must_not_be_openid -- the default scope is not allowed, since it's redundant
check(lower(trim(scope)) != 'openid'),
primary key(oidc_method_id, scope)
);
comment on table auth_oidc_scope is
'auth_oidc_scope entries are the optional scopes for a specific oidc auth method. There can be 0 or more for each parent oidc auth method. If an auth method has any scopes, they will be added to provider requests along with the openid default.';
create trigger
default_create_time_column
before
insert on auth_oidc_scope
for each row execute procedure default_create_time();
-- we will drop the oidc_auth_method_with_value_obj view, so we can recreate it
-- and add the oidc claim's scopes to the returned set.
drop view oidc_auth_method_with_value_obj;
-- oidc_auth_method_with_value_obj is useful for reading an oidc auth method
-- with its associated value objects (algs, auds, certs) as columns
-- with | delimited values. The use of the postgres string_agg(...) to
-- aggregate the value objects into a column works because we are only pulling
-- in one column from the associated tables and that value is part of the
-- primary key and unique. This view will make things like recursive listing of
-- oidc auth methods fairly straightforward to implement for the oidc repo.
-- The view also includes an is_primary_auth_method bool
create view oidc_auth_method_with_value_obj as
select
case when s.primary_auth_method_id is not null then
true
else false end
as is_primary_auth_method,
am.public_id,
am.scope_id,
am.name,
am.description,
am.create_time,
am.update_time,
am.version,
am.state,
am.api_url,
am.disable_discovered_config_validation,
am.issuer,
am.client_id,
am.client_secret,
am.client_secret_hmac,
am.key_id,
am.max_age,
-- the string_agg(..) column will be null if there are no associated value objects
string_agg(distinct alg.signing_alg_name, '|') as algs,
string_agg(distinct aud.aud_claim, '|') as auds,
string_agg(distinct cert.certificate, '|') as certs,
string_agg(distinct cs.scope, '|') as claims_scopes
from
auth_oidc_method am
left outer join iam_scope s on am.public_id = s.primary_auth_method_id
left outer join auth_oidc_signing_alg alg on am.public_id = alg.oidc_method_id
left outer join auth_oidc_aud_claim aud on am.public_id = aud.oidc_method_id
left outer join auth_oidc_certificate cert on am.public_id = cert.oidc_method_id
left outer join auth_oidc_scope cs on am.public_id = cs.oidc_method_id
group by am.public_id, is_primary_auth_method; -- there can be only one public_id + is_primary_auth_method, so group by isn't a problem.
comment on view oidc_auth_method_with_value_obj is
'oidc auth method with its associated value objects (algs, auds, certs, scopes) as columns with | delimited values';
`),
6001: []byte(`
-- auth_oidc_account_claim_map entries are the optional claim maps from custom
-- claims to the standard claims of sub, name and email. There can be 0 or more
-- for each parent oidc auth method.
create table auth_oidc_account_claim_map (
create_time wt_timestamp,
oidc_method_id wt_public_id
constraint auth_oidc_method_fkey
references auth_oidc_method(public_id)
on delete cascade
on update cascade,
from_claim text not null
constraint from_claim_must_not_be_empty
check(length(trim(from_claim)) > 0)
constraint from_claim_must_be_less_than_1024_chars
check(length(trim(from_claim)) < 1024),
to_claim text not null
constraint to_claim_valid_values
check (to_claim in ('sub', 'name', 'email')), -- intentionally case-sensitive matching
primary key(oidc_method_id, to_claim)
);
comment on table auth_oidc_account_claim_map is
'auth_oidc_account_claim_map entries are the optional claim maps from custom claims to the standard claims of sub, name and email. There can be 0 or more for each parent oidc auth method.';
create trigger
default_create_time_column
before
insert on auth_oidc_account_claim_map
for each row execute procedure default_create_time();
create trigger
immutable_columns
before
update on auth_oidc_account_claim_map
for each row execute procedure immutable_columns('oidc_method_id', 'from_claim', 'to_claim', 'create_time');
-- we will drop the oidc_auth_method_with_value_obj view, so we can recreate it
-- and add the oidc claim's scopes to the returned set.
drop view oidc_auth_method_with_value_obj;
-- oidc_auth_method_with_value_obj is useful for reading an oidc auth method
-- with its associated value objects (algs, auds, certs, claims scopes and
-- account claim maps) as columns with | delimited values. The use of the
-- postgres string_agg(...) to aggregate the value objects into a column works
-- because we are only pulling in one column from the associated tables and that
-- value is part of the primary key and unique. This view will make things like
-- recursive listing of oidc auth methods fairly straightforward to implement
-- for the oidc repo. The view also includes an is_primary_auth_method bool
create view oidc_auth_method_with_value_obj as
select
case when s.primary_auth_method_id is not null then
true
else false end
as is_primary_auth_method,
am.public_id,
am.scope_id,
am.name,
am.description,
am.create_time,
am.update_time,
am.version,
am.state,
am.api_url,
am.disable_discovered_config_validation,
am.issuer,
am.client_id,
am.client_secret,
am.client_secret_hmac,
am.key_id,
am.max_age,
-- the string_agg(..) column will be null if there are no associated value objects
string_agg(distinct alg.signing_alg_name, '|') as algs,
string_agg(distinct aud.aud_claim, '|') as auds,
string_agg(distinct cert.certificate, '|') as certs,
string_agg(distinct cs.scope, '|') as claims_scopes,
string_agg(distinct concat_ws('=', acm.from_claim, acm.to_claim), '|') as account_claim_maps
from
auth_oidc_method am
left outer join iam_scope s on am.public_id = s.primary_auth_method_id
left outer join auth_oidc_signing_alg alg on am.public_id = alg.oidc_method_id
left outer join auth_oidc_aud_claim aud on am.public_id = aud.oidc_method_id
left outer join auth_oidc_certificate cert on am.public_id = cert.oidc_method_id
left outer join auth_oidc_scope cs on am.public_id = cs.oidc_method_id
left outer join auth_oidc_account_claim_map acm on am.public_id = acm.oidc_method_id
group by am.public_id, is_primary_auth_method; -- there can be only one public_id + is_primary_auth_method, so group by isn't a problem.
comment on view oidc_auth_method_with_value_obj is
'oidc auth method with its associated value objects (algs, auds, certs, scopes) as columns with | delimited values';
`),
7001: []byte(`
create function wt_add_seconds(sec integer, ts timestamp with time zone)
returns timestamp with time zone
as $$
select ts + sec * '1 second'::interval;
$$ language sql
stable
returns null on null input;
comment on function wt_add_seconds is
'wt_add_seconds returns ts + sec.';
create function wt_add_seconds_to_now(sec integer)
returns timestamp with time zone
as $$
select wt_add_seconds(sec, current_timestamp);
$$ language sql
stable
returns null on null input;
comment on function wt_add_seconds_to_now is
'wt_add_seconds_to_now returns current_timestamp + sec.';
`),
7002: []byte(`
create domain wt_plugin_id as text
not null
check(
length(trim(value)) > 10 or value = 'pi_system'
);
comment on domain wt_plugin_id is
'"pi_system", or random ID generated with github.com/hashicorp/go-secure-stdlib/base62';
create table plugin (
public_id wt_plugin_id primary key
);
comment on table plugin is
'plugin is a table where each row represents a unique plugin registered with Boundary.';
insert into plugin (public_id)
values
('pi_system');
create trigger immutable_columns before update on plugin
for each row execute procedure immutable_columns('public_id');
create or replace function
disallow_system_plugin_deletion()
returns trigger
as $$
begin
if old.public_id = 'pi_system' then
raise exception 'deletion of system plugin not allowed';
end if;
return old;
end;
$$ language plpgsql;
create trigger plugin_disallow_system_deletion before delete on plugin
for each row execute procedure disallow_system_plugin_deletion();
`),
7003: []byte(`
create table job (
plugin_id wt_plugin_id not null
constraint plugin_fk
references plugin(public_id)
on delete cascade
on update cascade,
name wt_name not null,
description wt_description not null,
next_scheduled_run timestamp with time zone not null,
primary key (plugin_id, name)
);
comment on table job is
'job is a table where each row represents a unique job that can only have one running instance at any specific time.';
create trigger immutable_columns before update on job
for each row execute procedure immutable_columns('plugin_id', 'name');
create table job_run_status_enm (
name text not null primary key
constraint only_predefined_job_status_allowed
check(name in ('running', 'completed', 'failed', 'interrupted'))
);
comment on table job_run_status_enm is
'job_run_status_enm is an enumeration table where each row contains a valid job run state.';
insert into job_run_status_enm (name)
values
('running'),
('completed'),
('failed'),
('interrupted');
create table job_run (
private_id wh_dim_id primary key
default wh_dim_id(),
job_plugin_id wt_plugin_id not null,
job_name wt_name not null,
server_id wt_private_id
constraint server_fkey
references server(private_id)
on delete set null
on update cascade,
create_time wt_timestamp,
update_time wt_timestamp,
end_time timestamp with time zone,
completed_count int not null
default 0
constraint completed_count_can_not_be_negative
check(completed_count >= 0),
total_count int not null
default 0
constraint total_count_can_not_be_negative
check(total_count >= 0),
status text not null
default 'running'
constraint job_run_status_enm_fkey
references job_run_status_enm (name)
on delete restrict
on update cascade,
constraint job_run_completed_count_less_than_equal_to_total_count
check(completed_count <= total_count),
constraint job_fkey
foreign key (job_plugin_id, job_name)
references job (plugin_id, name)
on delete cascade
on update cascade
);
comment on table job_run is
'job_run is a table where each row represents an instance of a job run that is either actively running or has already completed.';
create unique index job_run_status_constraint
on job_run (job_plugin_id, job_name)
where status = 'running';
create trigger update_time_column before update on job_run
for each row execute procedure update_time_column();
create trigger default_create_time_column before insert on job_run
for each row execute procedure default_create_time();
create trigger immutable_columns before update on job_run
for each row execute procedure immutable_columns('private_id', 'job_plugin_id', 'job_name', 'create_time');
create view job_jobs_to_run as
with
running_jobs (job_plugin_id, job_name) as (
select job_plugin_id, job_name
from job_run
where status = 'running'
),
final (job_plugin_id, job_name, next_scheduled_run) as (
select plugin_id, name, next_scheduled_run
from job j
where next_scheduled_run <= current_timestamp
and not exists (
select
from running_jobs
where job_plugin_id = j.plugin_id
and job_name = j.name
)
)
select job_plugin_id, job_name, next_scheduled_run from final;
`),
8001: []byte(`
alter table session_connection
add column server_id text;
-- Note: here, and in the session table, we should add a trigger ensuring that
-- if server_id goes to null, we mark connections as closed. See
-- https://hashicorp.atlassian.net/browse/ICU-1495
alter table session_connection
add constraint server_fkey
foreign key (server_id)
references server (private_id)
on delete set null
on update cascade;
-- We now populate the connection information from existing session information
update session_connection sc
set
server_id = s.server_id
from
session s
where
sc.session_id = s.public_id;
`),
9001: []byte(`
-- The base abstract table
create table auth_managed_group (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
-- Ensure that if the auth method is deleted (which will also happen if the
-- scope is deleted) this is deleted too
constraint auth_method_fkey
foreign key (auth_method_id) -- fk1
references auth_method(public_id)
on delete cascade
on update cascade,
constraint auth_managed_group_auth_method_id_public_id_uq
unique(auth_method_id, public_id)
);
comment on table auth_managed_group is
'auth_managed_group is the abstract base table for managed groups.';
-- Define the immutable fields of auth_managed_group
create trigger
immutable_columns
before
update on auth_managed_group
for each row execute procedure immutable_columns('public_id', 'auth_method_id');
-- Function to insert into the base table when values are inserted into a
-- concrete type table. This happens before inserts so the foreign keys in the
-- concrete type will be valid.
create or replace function
insert_managed_group_subtype()
returns trigger
as $$
begin
insert into auth_managed_group
(public_id, auth_method_id)
values
(new.public_id, new.auth_method_id);
return new;
end;
$$ language plpgsql;
-- delete_managed_group_subtype() is an after delete trigger
-- function for subtypes of managed_group
create or replace function delete_managed_group_subtype()
returns trigger
as $$
begin
delete from auth_managed_group
where public_id = old.public_id;
return null; -- result is ignored since this is an after trigger
end;
$$ language plpgsql;
`),
9002: []byte(`
create table auth_oidc_managed_group (
public_id wt_public_id
primary key,
auth_method_id wt_public_id
not null,
name wt_name,
description wt_description,
create_time wt_timestamp,
update_time wt_timestamp,
version wt_version,
filter wt_bexprfilter
not null,
-- Ensure that this managed group relates to an oidc auth method, as opposed
-- to other types
constraint auth_oidc_method_fkey
foreign key (auth_method_id) -- fk1
references auth_oidc_method (public_id)
on delete cascade
on update cascade,
-- Ensure it relates to an abstract managed group
constraint auth_managed_group_fkey
foreign key (auth_method_id, public_id) -- fk2
references auth_managed_group (auth_method_id, public_id)
on delete cascade
on update cascade,
constraint auth_oidc_managed_group_auth_method_id_name_uq
unique(auth_method_id, name)
);
comment on table auth_oidc_managed_group is
'auth_oidc_managed_group entries are subtypes of auth_managed_group and represent an oidc managed group.';
-- Define the immutable fields of auth_oidc_managed_group
create trigger
immutable_columns
before
update on auth_oidc_managed_group
for each row execute procedure immutable_columns('public_id', 'auth_method_id', 'create_time');
-- Populate create time on insert
create trigger
default_create_time_column
before
insert on auth_oidc_managed_group
for each row execute procedure default_create_time();
-- Generate update time on update
create trigger
update_time_column
before
update on auth_oidc_managed_group
for each row execute procedure update_time_column();
-- Update version when something changes
create trigger
update_version_column
after
update on auth_oidc_managed_group
for each row execute procedure update_version_column();
-- Add into the base table when inserting into the concrete table
create trigger
insert_managed_group_subtype
before insert on auth_oidc_managed_group
for each row execute procedure insert_managed_group_subtype();
-- Ensure that deletions in the oidc subtype result in deletions to the base
-- table.
create trigger
delete_managed_group_subtype
after
delete on auth_oidc_managed_group
for each row execute procedure delete_managed_group_subtype();
-- The tickets for oplog are the subtypes not the base types because no updates
-- are done to any values in the base types.
insert into oplog_ticket
(name, version)
values
('auth_oidc_managed_group', 1);
`),
9003: []byte(`
-- Mappings of account to oidc managed groups. This is a non-abstract table with
-- a view (below) so that it is a natural aggregate for the oplog (also below).
create table auth_oidc_managed_group_member_account (
create_time wt_timestamp,
managed_group_id wt_public_id
references auth_oidc_managed_group(public_id)
on delete cascade
on update cascade,
member_id wt_public_id
references auth_oidc_account(public_id)
on delete cascade
on update cascade,
primary key (managed_group_id, member_id)
);
comment on table auth_oidc_managed_group_member_account is
'auth_oidc_managed_group_member_account is the join table for managed oidc groups and accounts.';
-- auth_immutable_managed_oidc_group_member_account() ensures that group members are immutable.
create or replace function
auth_immutable_managed_oidc_group_member_account()
returns trigger
as $$
begin
raise exception 'managed oidc group members are immutable';
end;
$$ language plpgsql;
create trigger
default_create_time_column
before
insert on auth_oidc_managed_group_member_account
for each row execute procedure default_create_time();
create trigger
auth_immutable_managed_oidc_group_member_account
before
update on auth_oidc_managed_group_member_account
for each row execute procedure auth_immutable_managed_oidc_group_member_account();
-- Initially create the view with just oidc; eventually we can replace this view
-- to union with other subtype tables.
create view auth_managed_group_member_account as
select
oidc.create_time,
oidc.managed_group_id,
oidc.member_id
from
auth_oidc_managed_group_member_account oidc;
`),
9004: []byte(`
-- iam_managed_group_role contains roles that have been assigned to managed
-- groups. Managed groups can be from any scope. The rows in this table must be
-- immutable after insert, which will be ensured with a before update trigger
-- using iam_immutable_role_principal().
create table iam_managed_group_role (
create_time wt_timestamp,
role_id wt_role_id
references iam_role(public_id)
on delete cascade
on update cascade,
principal_id wt_public_id
references auth_managed_group(public_id)
on delete cascade
on update cascade,
primary key (role_id, principal_id)
);
create trigger immutable_role_principal
before update on iam_managed_group_role
for each row execute procedure iam_immutable_role_principal();
create trigger default_create_time_column
before insert on iam_managed_group_role
for each row execute procedure default_create_time();
-- iam_principal_role provides a consolidated view all principal roles assigned
-- (user and group roles).
create or replace view iam_principal_role as
select
ur.create_time,
ur.principal_id,
ur.role_id,
u.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, u.scope_id, ur.principal_id) as scoped_principal_id,
'user' as type
from
iam_user_role ur,
iam_role r,
iam_user u
where
ur.role_id = r.public_id and
u.public_id = ur.principal_id
union
select
gr.create_time,
gr.principal_id,
gr.role_id,
g.scope_id as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, g.scope_id, gr.principal_id) as scoped_principal_id,
'group' as type
from
iam_group_role gr,
iam_role r,
iam_group g
where
gr.role_id = r.public_id and
g.public_id = gr.principal_id
union
select
mgr.create_time,
mgr.principal_id,
mgr.role_id,
(select scope_id from auth_method am where am.public_id = amg.auth_method_id) as principal_scope_id,
r.scope_id as role_scope_id,
get_scoped_principal_id(r.scope_id, (select scope_id from auth_method am where am.public_id = amg.auth_method_id), mgr.principal_id) as scoped_principal_id,
'managed group' as type
from
iam_managed_group_role mgr,
iam_role r,
auth_managed_group amg
where
mgr.role_id = r.public_id and
amg.public_id = mgr.principal_id;
`),
},
}
}