feat(db): new cols for retain and delete session recordings, update structs, and tests

pull/4239/head
Danielle Miu 2 years ago committed by Louis Ruch
parent 6ca1bc3c3f
commit df02651bb5

@ -0,0 +1,184 @@
-- Copyright (c) HashiCorp, Inc.
-- SPDX-License-Identifier: BUSL-1.1
begin;
create function wt_add_days(ds integer, ts timestamptz) returns timestamptz
as $$
select ts + ds * '1 day'::interval;
$$ language sql
stable
returns null on null input;
comment on function wt_add_days is
'wt_add_days returns ts + days.';
alter table recording_session add column retain_for_days int not null default -1;
alter table recording_session add column retain_until rec_timestamp;
alter table recording_session add column delete_after_days int not null default 0
constraint delete_after_days_non_negative
check(delete_after_days >= 0);
alter table recording_session
add constraint delete_after_days_greater_or_equal_than_retain_for_days
check(delete_after_days >= retain_for_days or delete_after_days = 0);
alter table recording_session add column delete_after rec_timestamp
constraint delete_after_null_or_after_retain_until
check(delete_after >= retain_until);
alter table recording_session add column delete_time rec_timestamp
constraint delete_time_null_or_after_retain_until
check(delete_time >= retain_until);
alter table recording_session add column target_org_id wt_public_id null
references iam_scope_org(scope_id)
on delete set null
on update cascade;
-- set_delete_and_retain_times is a trigger that runs before update on recording_session
-- to set the calculated timestamps for retain_until and delete_after from the session's
-- end time.
create or replace function set_delete_and_retain_times() returns trigger
as $$
begin
if new.end_time is not null then
if new.retain_for_days = 0 then
new.retain_until = null;
elsif new.retain_for_days < 0 then
new.retain_until = 'infinity'::timestamptz;
else
new.retain_until = wt_add_days(new.retain_for_days, new.end_time);
end if;
if new.delete_after_days = 0 then
new.delete_after = null;
-- new.delete_after_days < 0 is not possible due to delete_after_days_non_negative
else
new.delete_after = wt_add_days(new.delete_after_days, new.end_time);
end if;
end if;
return new;
end;
$$ language plpgsql;
create trigger retain_for_days_and_delete_after_days_not_zero before insert or update on recording_session
for each row execute procedure retain_for_days_and_delete_after_days_not_zero();
create trigger delete_after_days_zero_if_infinite_retain_for_days before insert or update on recording_session
for each row execute procedure delete_after_days_zero_if_infinite_retain_for_days();
create trigger set_delete_and_retain_times before update on recording_session
for each row execute procedure set_delete_and_retain_times();
-- target_org_id must be populated
update recording_session rs
set target_org_id = ish.parent_id
from iam_scope_hst ish
join iam_scope_org iso on ish.parent_id = iso.scope_id
where ish.history_id = rs.target_project_hst_id;
-- replaces 71/12_session_recording_views.up.sql
drop view session_recording_aggregate; -- this is necessary, throws weird syntax error without
create view session_recording_aggregate as
select
rs.public_id,
rs.storage_bucket_id,
rs.session_id,
rs.create_time,
rs.update_time,
rs.start_time,
rs.end_time,
rs.state,
rs.error_details,
rs.endpoint,
rs.retain_until,
rs.delete_after,
sb.scope_id as storage_bucket_scope_id,
-- fields that cover the user fields at creation time
uh.public_id as user_history_public_id,
uh.name as user_history_name,
uh.description as user_history_description,
uh.scope_id as user_history_scope_id,
-- fields that cover the user's scope information at creation time
ush.public_id as user_scope_history_public_id,
ush.name as user_scope_history_name,
ush.description as user_scope_history_description,
ush.type as user_scope_history_type,
ush.parent_id as user_scope_history_parent_id,
ush.primary_auth_method_id as user_scope_history_primary_auth_method_id,
-- fields that cover the target fields at creation time
th.public_id as target_history_public_id,
th.name as target_history_name,
th.description as target_history_description,
th.default_port as target_history_default_port,
th.session_max_seconds as target_history_session_max_seconds,
th.session_connection_limit as target_history_session_connection_limit,
th.worker_filter as target_history_worker_filter,
th.ingress_worker_filter as target_history_ingress_worker_filter,
th.egress_worker_filter as target_history_egress_worker_filter,
th.default_client_port as target_history_default_client_port,
th.enable_session_recording as target_history_enable_session_recording,
th.storage_bucket_id as target_history_storage_bucket_id,
-- fields that cover the target's scope information at creation time
tsh.public_id as target_scope_history_public_id,
tsh.name as target_scope_history_name,
tsh.description as target_scope_history_description,
tsh.type as target_scope_history_type,
tsh.parent_id as target_scope_history_parent_id,
tsh.primary_auth_method_id as target_scope_history_primary_auth_method_id,
-- static
-- host catalogs
shch.public_id as static_catalog_history_public_id,
shch.project_id as static_catalog_history_project_id,
shch.name as static_catalog_history_name,
shch.description as static_catalog_history_description,
-- hosts
shh.public_id as static_host_history_public_id,
shh.name as static_host_history_name,
shh.description as static_host_history_description,
-- catalog_id is unnecessary as its inferred from the host catalog row
shh.address as static_host_history_address,
-- plugin
-- host catalogs
hpch.public_id as plugin_catalog_history_public_id,
hpch.project_id as plugin_catalog_history_project_id,
hpch.name as plugin_catalog_history_name,
hpch.description as plugin_catalog_history_description,
hpch.attributes as plugin_catalog_history_attributes,
hpch.plugin_id as plugin_catalog_history_plugin_id,
-- hosts
hph.public_id as plugin_host_history_public_id,
hph.name as plugin_host_history_name,
hph.description as plugin_host_history_description,
-- catalog_id is unnecessary as its inferred from the host catalog row
hph.external_id as plugin_host_history_external_id,
hph.external_name as plugin_host_history_external_name
from recording_session rs
join storage_plugin_storage_bucket sb on
rs.storage_bucket_id = sb.public_id
join iam_user_hst uh on
rs.user_hst_id = uh.history_id
join iam_scope_hst as ush on
rs.user_scope_hst_id = ush.history_id
join target_ssh_hst th on
rs.target_hst_id = th.history_id
join iam_scope_hst as tsh on
rs.target_project_hst_id = tsh.history_id
left join static_host_catalog_hst as shch on
rs.host_catalog_hst_id = shch.history_id
left join host_plugin_catalog_hst as hpch on
rs.host_catalog_hst_id = hpch.history_id
left join static_host_hst as shh on
rs.host_hst_id = shh.history_id
left join host_plugin_host_hst as hph on
rs.host_hst_id = hph.history_id
where (rs.delete_after is null or rs.delete_after < now())
and (rs.delete_time is null or rs.delete_time < now());
comment on view session_recording_aggregate is
'session_recording_aggregate contains the session recording resource with its storage bucket scope info and historical user info.';
commit;

@ -22,6 +22,7 @@ comment on view recording_channel_ssh_aggregate is
'recording_channel_ssh_aggregate contains the ssh channel recording info along with other info needed for displaying it on the api.';
-- replaced in xx/02_recording_session.up.sql -- TODO: replace xx with the final migration number for storage policies
create view session_recording_aggregate as
select
rs.public_id,

@ -0,0 +1,234 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package oss_test
import (
"context"
"database/sql"
"testing"
"github.com/hashicorp/boundary/internal/db"
"github.com/hashicorp/boundary/internal/db/common"
"github.com/hashicorp/boundary/internal/db/schema/migrations/oss"
"github.com/hashicorp/boundary/testing/dbtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMigrationHook100002(t *testing.T) { // TODO: this will need to be updated when the migration has been numbered
t.Parallel()
ctx := context.Background()
dialect := dbtest.Postgres
c, u, _, err := dbtest.StartUsingTemplate(dialect, dbtest.WithTemplate(dbtest.Template1))
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, c())
})
d, err := common.SqlOpen(dialect, u)
require.NoError(t, err)
// get a connection
dbType, err := db.StringToDbType(dialect)
require.NoError(t, err)
conn, err := db.Open(ctx, dbType, u)
require.NoError(t, err)
rw := db.New(conn)
oss.ApplyMigration(t, ctx, d, 100001) // TODO: this will need to be updated when the migration has been numbered
// Insert test data into the database.
populateMigration100002(t, rw)
// now apply migration 100/002
oss.ApplyMigration(t, ctx, d, 100002) // TODO: this will need to be updated when the migration has been numbered
// session recording sr_________1 should now have target org id o_test__100002
rows, err := rw.Query(ctx, "select target_org_id from recording_session where public_id = @public_id;", []any{sql.Named("public_id", "sr_________1")})
require.NoError(t, err)
count := 0
var targetOrgId *string
for rows.Next() {
require.NoError(t, rows.Scan(&targetOrgId))
count++
}
rows.Close()
assert.Equal(t, 1, count)
assert.Equal(t, "o_test__100002", *targetOrgId)
// session recording sr_________2 should have null target org id
rows, err = rw.Query(ctx, "select target_org_id from recording_session where public_id = @public_id;", []any{sql.Named("public_id", "sr_________2")})
require.NoError(t, err)
defer rows.Close()
count = 0
for rows.Next() {
require.NoError(t, rows.Scan(&targetOrgId))
count++
}
assert.Equal(t, 1, count)
assert.Nil(t, targetOrgId)
}
func populateMigration100002(t *testing.T, rw *db.Db) { // TODO: this will need to be updated when the migration has been numbered
t.Helper()
require := require.New(t)
ctx := context.Background()
var (
err error
query string
)
query = `
insert into iam_scope
(parent_id, type, public_id, name)
values
('global', 'org', 'o_test__100002', 'Testing Session Recording Target Org Ids'),
('global', 'org', 'o_test2_100002', 'Second test org for Session Recording Target Org Ids');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into iam_scope
(parent_id, type, public_id, name)
values
('o_test__100002', 'project', 'p_test__100002', 'testing 100002 Project A'),
('o_test2_100002', 'project', 'p_test2_100002', 'testing 100002 Project B');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into plugin
(scope_id, public_id, name)
values
('global', 'pl__plg___sb', 'Storage Bucket Plugin');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into plugin_storage_supported
(public_id)
values
('pl__plg___sb');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into storage_plugin_storage_bucket
(plugin_id, scope_id, public_id, bucket_name, worker_filter, secrets_hmac)
values
('pl__plg___sb', 'global', 'sb____global', 'Global Storage Bucket', 'test worker filter', '\xdeadbeef');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
// yes, I copied many of these from the colors persona, I am not ashamed
query = `
insert into iam_user
(scope_id, public_id, name)
values
('o_test__100002', 'u______clare', 'Clare'),
('o_test2_100002', 'u______clara', 'Clara');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into kms_root_key
(scope_id, private_id)
values
('o_test__100002', 'krk___colors'),
('o_test2_100002', 'krk__colors2');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into kms_root_key_version
(root_key_id, private_id, key)
values
('krk___colors', 'krkv__colors', '_______color1'::bytea),
('krk__colors2', 'krkv_colors2', '_______color2'::bytea);`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into kms_data_key
(root_key_id, private_id, purpose)
values
('krk___colors', 'kdk___colors', 'database'),
('krk__colors2', 'kdk__colors2', 'database');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into kms_data_key_version
(root_key_version_id, data_key_id, private_id, key)
values
('krkv__colors', 'kdk___colors', 'kdkv__colors', '_______color3'::bytea),
('krkv_colors2', 'kdk__colors2', 'kdkv_colors2', '_______color4'::bytea);`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into auth_password_conf
(password_method_id, private_id)
values
('apm___colors', 'apmc__colors'),
('apm__colors2', 'apmc_colors2');
-- Add password auth method to organizations
insert into auth_password_method
(scope_id, public_id, password_conf_id, name)
values
('o_test__100002', 'apm___colors', 'apmc__colors', 'Colors Auth Password'),
('o_test2_100002', 'apm__colors2', 'apmc_colors2', 'Colors 2 Auth Password');
insert into auth_password_account
(auth_method_id, public_id, login_name)
values
('apm___colors', 'apa____clare', 'clare'),
('apm__colors2', 'apa____clara', 'clara');
update auth_account set iam_user_id = 'u______clare' where public_id = 'apa____clare';
update auth_account set iam_user_id = 'u______clara' where public_id = 'apa____clara';
insert into auth_token
(key_id, auth_account_id, public_id, token)
values
('kdkv__colors', 'apa____clare', 'tok____clare', 'tok____clare'::bytea),
('kdkv_colors2', 'apa____clara', 'tok____clara', 'tok____clara'::bytea);`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
// Add Test Session Recording
query = `
insert into target_ssh
(project_id, public_id, name, enable_session_recording, storage_bucket_id)
values
('p_test__100002', 'tssh_test__100002', 'Test SSH Target', true, 'sb____global'),
('p_test2_100002', 'tssh_test2_100002', 'Test2 SSH Target', true, 'sb____global');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into session
(project_id, target_id, public_id, user_id, auth_token_id, certificate, endpoint)
values
('p_test__100002', 'tssh_test__100002', 's_test__100002', 'u______clare', 'tok____clare', 'abc'::bytea, 'ep1'),
('p_test2_100002', 'tssh_test2_100002', 's_test2_100002', 'u______clara', 'tok____clara', 'abc'::bytea, 'ep1');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
query = `
insert into recording_session
(public_id, storage_bucket_id, session_id)
values
('sr_________1', 'sb____global', 's_test__100002'),
('sr_________2', 'sb____global', 's_test2_100002');`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
// delete the second org so we can confirm missing orgs are still null after migration
query = `
delete from iam_scope where public_id = 'o_test2_100002';`
_, err = rw.Exec(ctx, query, nil)
require.NoError(err)
}

@ -11,8 +11,8 @@ import (
"github.com/hashicorp/boundary/internal/db"
"github.com/hashicorp/boundary/internal/db/common"
"github.com/hashicorp/boundary/internal/db/schema"
"github.com/hashicorp/boundary/internal/db/schema/migration"
"github.com/hashicorp/boundary/internal/db/schema/migrations/oss"
"github.com/hashicorp/boundary/internal/db/schema/migrations/oss/internal/hook46001"
"github.com/hashicorp/boundary/testing/dbtest"
"github.com/stretchr/testify/require"
@ -41,7 +41,7 @@ func TestMigrationHook46001(t *testing.T) {
// The reason why we are starting at 43001 is because one of the security vulnerabilities was a
// bad trigger on table target_host_set that was suppose to validate scope_id integrity between
// the target & host set. In migration 44002, this trigger is fixed.
applyMigration(t, ctx, d, 43001)
oss.ApplyMigration(t, ctx, d, 43001)
// Insert test data into the database.
populateScopes(t, rw)
@ -63,7 +63,7 @@ func TestMigrationHook46001(t *testing.T) {
// Apply migrations 4400x, 4500x
migrationIds := []int{44001, 44002, 44003, 44004, 45001, 45002, 45003}
for _, migrationId := range migrationIds {
applyMigration(t, ctx, d, migrationId)
oss.ApplyMigration(t, ctx, d, migrationId)
}
tx, err := d.BeginTx(ctx, nil)
@ -386,27 +386,3 @@ func validateRepairFunc(t *testing.T, rw *db.Db, repairReport migration.Repairs)
},
}, associations)
}
func applyMigration(t *testing.T, ctx context.Context, d *sql.DB, migrationId int) {
dialect := dbtest.Postgres
m, err := schema.NewManager(ctx, schema.Dialect(dialect), d, schema.WithEditions(
schema.TestCreatePartialEditions(schema.Dialect(dialect), schema.PartialEditions{"oss": migrationId}),
))
require.NoError(t, err)
_, err = m.ApplyMigrations(ctx)
require.NoError(t, err)
state, err := m.CurrentState(ctx)
require.NoError(t, err)
want := &schema.State{
Initialized: true,
Editions: []schema.EditionState{
{
Name: "oss",
BinarySchemaVersion: migrationId,
DatabaseSchemaVersion: migrationId,
DatabaseSchemaState: schema.Equal,
},
},
}
require.Equal(t, want, state)
}

@ -0,0 +1,38 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package oss
import (
"context"
"database/sql"
"testing"
"github.com/hashicorp/boundary/internal/db/schema"
"github.com/hashicorp/boundary/testing/dbtest"
"github.com/stretchr/testify/require"
)
func ApplyMigration(t *testing.T, ctx context.Context, d *sql.DB, migrationId int) {
dialect := dbtest.Postgres
m, err := schema.NewManager(ctx, schema.Dialect(dialect), d, schema.WithEditions(
schema.TestCreatePartialEditions(schema.Dialect(dialect), schema.PartialEditions{"oss": migrationId}),
))
require.NoError(t, err)
_, err = m.ApplyMigrations(ctx)
require.NoError(t, err)
state, err := m.CurrentState(ctx)
require.NoError(t, err)
want := &schema.State{
Initialized: true,
Editions: []schema.EditionState{
{
Name: "oss",
BinarySchemaVersion: migrationId,
DatabaseSchemaVersion: migrationId,
DatabaseSchemaState: schema.Equal,
},
},
}
require.Equal(t, want, state)
}

@ -35,7 +35,8 @@ TESTS ?= tests/setup/*.sql \
tests/recording/*.sql \
tests/auth/*/*.sql \
tests/purge/*.sql \
tests/pagination/*.sql
tests/pagination/*.sql \
tests/policy/*.sql
POSTGRES_DOCKER_IMAGE_BASE ?= postgres

@ -549,9 +549,9 @@ begin;
('s2_____clare', 's2c1___clare');
insert into recording_session
(session_id, storage_bucket_id, public_id)
(session_id, storage_bucket_id, public_id, target_org_id)
values
('s1_____clare', 'sb____global', 'sr1____clare'),
('s1______cora', 'sb____colors', 'sr1_____cora');
('s1_____clare', 'sb____global', 'sr1____clare', 'o_____colors'),
('s1______cora', 'sb____colors', 'sr1_____cora', 'o_____colors');
commit;

@ -0,0 +1,99 @@
-- Copyright (c) HashiCorp, Inc.
-- SPDX-License-Identifier: BUSL-1.1
begin;
select plan(18);
select has_table('recording_session');
select has_column('recording_session', 'retain_for_days');
select has_column('recording_session', 'retain_until');
select has_column('recording_session', 'delete_after_days');
select has_column('recording_session', 'delete_after');
select has_column('recording_session', 'delete_time');
-- test value-setting triggers
-- no endtime, retain_until and delete_after should be null
insert into recording_session
(public_id, storage_bucket_id, session_id, target_org_id, retain_for_days, delete_after_days)
values
('sr_________1', 'sb____global', 's2_____carly', 'o_____colors', 10, 10);
select results_eq('select retain_until, delete_after from recording_session where public_id = ''sr_________1''',
$$VALUES (null::rec_timestamp, null::rec_timestamp) $$, 'null end time should yield null retain_until and delete_after values');
-- update endtime, retain_until and delete_after both positive
update recording_session set end_time = '2024-01-01 12:34:56.789+00' where public_id = 'sr_________1';
select results_eq('select retain_until, delete_after from recording_session where public_id = ''sr_________1''',
$$VALUES ('2024-01-11 12:34:56.789+00'::rec_timestamp, '2024-01-11 12:34:56.789+00'::rec_timestamp) $$, 'populated end time should correctly calc retain_until and delete_after values');
-- set delete_time at retain until
update recording_session set delete_time = retain_until where public_id = 'sr_________1';
select results_eq('select delete_time from recording_session where public_id = ''sr_________1''',
ARRAY['2024-01-11 12:34:56.789+00'::rec_timestamp], 'delete time must be able to be set to retain_until');
update recording_session set delete_time = null where public_id = 'sr_________1'; -- reset delete time
-- set delete_time after retain until
update recording_session set delete_time = '2024-01-23 12:34:56.789+00' where public_id = 'sr_________1';
select results_eq('select delete_time from recording_session where public_id = ''sr_________1''',
ARRAY['2024-01-23 12:34:56.789+00'::rec_timestamp], 'delete time must be able to be set after retain_until');
update recording_session set delete_time = null where public_id = 'sr_________1'; -- reset delete time
-- set delete_time with null retain until
update recording_session set retain_for_days = 0, delete_time = '2024-01-11 12:34:56.789+00' where public_id = 'sr_________1';
select results_eq('select delete_time from recording_session where public_id = ''sr_________1''',
ARRAY['2024-01-11 12:34:56.789+00'::rec_timestamp], 'delete time must be able to be set with null retain_until'); -- TODO: is this logic correct? should we add a check to disallow this?
update recording_session set delete_time = null where public_id = 'sr_________1'; -- reset delete time
-- update retain, negative value should generate inf retention
update recording_session set retain_for_days = -1, delete_after_days = 0 where public_id = 'sr_________1';
select results_eq('select retain_until, delete_after from recording_session where public_id = ''sr_________1''',
$$VALUES ('infinity'::rec_timestamp, null::rec_timestamp) $$, 'negative retain for days should calc inf value');
-- update retain and delete, zero values should generate null retention and deletion
-- test constraints
prepare update_rs_delete_after_days_and_retain_for_days_zero as
update recording_session set
retain_for_days = 0,
delete_after_days = 0
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_after_days_and_retain_for_days_zero', 'P0001', null, 'delete_after_days and retain_for_days both cannot be zero');
prepare update_rs_delete_after_days_negative as
update recording_session set
retain_for_days = 10,
delete_after_days = -1
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_after_days_negative', 23514, null, 'delete_after_days cannot be negative');
prepare update_rs_delete_after_days_while_inf_retain as
update recording_session set
retain_for_days = -1,
delete_after_days = 10
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_after_days_while_inf_retain', 'P0001', null, 'delete_after_days must be 0 while retain_for_days is inf');
prepare update_rs_delete_after_less_than_retain as
update recording_session set
retain_for_days = 6,
delete_after_days = 5
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_after_less_than_retain', 23514, null, 'delete_after must be greater than or equal to retain_for');
prepare update_rs_delete_time_before_retain as
update recording_session set
retain_for_days = 10,
delete_time = '2024-01-11 12:34:56.788+00'
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_time_before_retain', 23514, null, 'delete_time must be after or equal to retain_until');
prepare update_rs_delete_time_with_inf_retain as
update recording_session set
retain_for_days = -1,
delete_time = '2077-01-11 12:34:56.789+00'
where public_id = 'sr_________1';
select throws_ok('update_rs_delete_time_with_inf_retain', 23514, null, 'delete_time cannot be set with inf retain');
select * from finish();
rollback;

@ -17,9 +17,9 @@ begin;
select has_view('recording_channel_ssh_aggregate', 'view for aggregating channel recording info does not exist');
insert into recording_session
(public_id, storage_bucket_id, session_id, state)
(public_id, storage_bucket_id, session_id, state, target_org_id)
values
('sr_123456789', 'sb____global', 's2_____clare', 'started');
('sr_123456789', 'sb____global', 's2_____clare', 'started', 'o_____colors');
insert into session_connection
(public_id, session_id)
values

@ -14,9 +14,9 @@ begin;
select wtt_load('widgets', 'iam', 'kms', 'auth', 'hosts', 'targets', 'sessions');
insert into recording_session
(public_id, storage_bucket_id, session_id)
(public_id, storage_bucket_id, session_id, target_org_id)
values
('sr_123456789', 'sb____global', 's2_____clare');
('sr_123456789', 'sb____global', 's2_____clare', 'o_____colors');
insert into session_connection
(public_id, session_id)
values

@ -64,24 +64,24 @@ begin;
-- Try to insert row with null session id
prepare insert_invalid_recording_session as
insert into recording_session
(public_id, storage_bucket_id, session_id, state)
(public_id, storage_bucket_id, session_id, state, target_org_id)
values
('sr_________1', 'sb____global', null, 'started');
('sr_________1', 'sb____global', null, 'started', 'o_____colors');
select throws_ok('insert_invalid_recording_session', null, null, 'insert invalid recording_session succeeded');
-- Try to insert row with non-started state
prepare insert_with_invalid_state as
insert into recording_session
(public_id, storage_bucket_id, session_id, state)
(public_id, storage_bucket_id, session_id, state, target_org_id)
values
('sr_________1', 'sb_________g', 's1_____clare', 'available');
('sr_________1', 'sb_________g', 's1_____clare', 'available', 'o_____colors');
select throws_ok('insert_with_invalid_state', null, null, 'insert invalid recording_session state succeeded');
prepare insert_recording_session as
insert into recording_session
(public_id, storage_bucket_id, session_id, state)
(public_id, storage_bucket_id, session_id, state, target_org_id)
values
('sr_________1', 'sb____global', 's2_____clare', 'started');
('sr_________1', 'sb____global', 's2_____clare', 'started', 'o_____colors');
select lives_ok('insert_recording_session');
select results_eq(
@ -90,9 +90,9 @@ begin;
prepare insert_recording_session_target_address as
insert into recording_session
(public_id, storage_bucket_id, session_id)
(public_id, storage_bucket_id, session_id, target_org_id)
values
('sr_________2', 'sb____global', 's2______cora');
('sr_________2', 'sb____global', 's2______cora', 'o_____colors');
select lives_ok('insert_recording_session_target_address');
select results_eq(
@ -101,9 +101,9 @@ begin;
prepare insert_recording_session_plugin_host as
insert into recording_session
(public_id, storage_bucket_id, session_id)
(public_id, storage_bucket_id, session_id, target_org_id)
values
('sr_________3', 'sb____global', 's2_____carly');
('sr_________3', 'sb____global', 's2_____carly', 'o_____colors');
select lives_ok('insert_recording_session_plugin_host');
-- Try to set end_time before start_time

Loading…
Cancel
Save