mirror of https://github.com/hashicorp/boundary
Add httpfs DB migrations and generate the input (#35)
parent
bd5ddea6e4
commit
7f2aa5ca8f
@ -0,0 +1,121 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
"github.com/golang-migrate/migrate/v4/source/httpfs"
|
||||
)
|
||||
|
||||
// migrationDriver satisfies the remaining need of the Driver interface, since
|
||||
// the package uses PartialDriver under the hood
|
||||
type migrationDriver struct {
|
||||
dialect string
|
||||
}
|
||||
|
||||
// Open returns the given "file"
|
||||
func (m *migrationDriver) Open(name string) (http.File, error) {
|
||||
var ff *fakeFile
|
||||
switch m.dialect {
|
||||
case "postgres":
|
||||
ff = postgresMigrations[name]
|
||||
}
|
||||
if ff == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
ff.name = strings.TrimPrefix(name, "migrations/")
|
||||
ff.reader = bytes.NewReader(ff.bytes)
|
||||
ff.dialect = m.dialect
|
||||
return ff, nil
|
||||
}
|
||||
|
||||
// NewMigrationSource creates a source.Driver using httpfs with the given dialect
|
||||
func NewMigrationSource(dialect string) (source.Driver, error) {
|
||||
switch dialect {
|
||||
case "postgres":
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown migrations dialect %s", dialect)
|
||||
}
|
||||
return httpfs.New(&migrationDriver{dialect}, "migrations")
|
||||
}
|
||||
|
||||
// fakeFile is used to satisfy the http.File interface
|
||||
type fakeFile struct {
|
||||
name string
|
||||
bytes []byte
|
||||
reader *bytes.Reader
|
||||
dialect string
|
||||
}
|
||||
|
||||
func (f *fakeFile) Read(p []byte) (n int, err error) {
|
||||
return f.reader.Read(p)
|
||||
}
|
||||
|
||||
func (f *fakeFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.reader.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *fakeFile) Close() error { return nil }
|
||||
|
||||
// Readdir returns os.FileInfo values, in sorted order, and eliding the
|
||||
// migrations "dir"
|
||||
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
// Get the right map
|
||||
var migrationsMap map[string]*fakeFile
|
||||
switch f.dialect {
|
||||
case "postgres":
|
||||
migrationsMap = postgresMigrations
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown database dialect %s", f.dialect)
|
||||
}
|
||||
|
||||
// Sort the keys. May not be necessary but feels nice.
|
||||
keys := make([]string, 0, len(migrationsMap))
|
||||
for k := range migrationsMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Create the slice of fileinfo objects to return
|
||||
ret := make([]os.FileInfo, 0, len(migrationsMap))
|
||||
for _, v := range keys {
|
||||
// We need "migrations" in the map for the initial Open call but we
|
||||
// should not return it as part of the "directory"'s "files".
|
||||
if v == "migrations" {
|
||||
continue
|
||||
}
|
||||
stat, err := migrationsMap[v].Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, stat)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Stat returns a new fakeFileInfo object with the necessary bits
|
||||
func (f *fakeFile) Stat() (os.FileInfo, error) {
|
||||
return &fakeFileInfo{
|
||||
name: f.name,
|
||||
size: int64(len(f.bytes)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// fakeFileInfo satisfies os.FileInfo but represents our fake "files"
|
||||
type fakeFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (f *fakeFileInfo) Name() string { return f.name }
|
||||
func (f *fakeFileInfo) Size() int64 { return f.size }
|
||||
func (f *fakeFileInfo) Mode() os.FileMode { return os.ModePerm }
|
||||
func (f *fakeFileInfo) ModTime() time.Time { return time.Now() }
|
||||
func (f *fakeFileInfo) IsDir() bool { return false }
|
||||
func (f *fakeFileInfo) Sys() interface{} { return nil }
|
||||
@ -0,0 +1,8 @@
|
||||
# Be sure to place this BEFORE `include` directives, if any.
|
||||
THIS_FILE := $(lastword $(MAKEFILE_LIST))
|
||||
|
||||
migrations:
|
||||
go run -tags genmigrations .
|
||||
goimports -w ${GEN_BASEPATH}/internal/db/migrations
|
||||
|
||||
.PHONY: migrations
|
||||
@ -0,0 +1,88 @@
|
||||
// +build genmigrations
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// generate looks for migration sql in a directory for the given dialect and
|
||||
// applies the templates below to the contents of the files, building up a
|
||||
// migrations map for the dialect
|
||||
func generate(dialect string) {
|
||||
baseDir := os.Getenv("GEN_BASEPATH") + fmt.Sprint("/internal/db/migrations")
|
||||
dir, err := os.Open(fmt.Sprintf("%s/%s", baseDir, dialect))
|
||||
if err != nil {
|
||||
fmt.Printf("error opening dir with dialect %s: %v\n", dialect, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
names, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading dir names with dialect %s: %v\n", dialect, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
outBuf := bytes.NewBuffer(nil)
|
||||
valuesBuf := bytes.NewBuffer(nil)
|
||||
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
if !strings.HasSuffix(name, ".sql") {
|
||||
continue
|
||||
}
|
||||
contents, err := ioutil.ReadFile(fmt.Sprintf("%s/%s/%s", baseDir, dialect, name))
|
||||
if err != nil {
|
||||
fmt.Printf("error opening file %s with dialect %s: %v", name, dialect, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
migrationsValueTemplate.Execute(valuesBuf, struct {
|
||||
Name string
|
||||
Contents string
|
||||
}{
|
||||
Name: name,
|
||||
Contents: string(contents),
|
||||
})
|
||||
}
|
||||
migrationsTemplate.Execute(outBuf, struct {
|
||||
Type string
|
||||
Values string
|
||||
}{
|
||||
Type: dialect,
|
||||
Values: valuesBuf.String(),
|
||||
})
|
||||
|
||||
outFile := fmt.Sprintf("%s/%s.gen.go", baseDir, dialect)
|
||||
if err := ioutil.WriteFile(outFile, outBuf.Bytes(), 0644); err != nil {
|
||||
fmt.Printf("error writing file %q: %v\n", outFile, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var migrationsTemplate = template.Must(template.New("").Parse(
|
||||
`// Code generated by "make migrations"; DO NOT EDIT.
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
var {{ .Type }}Migrations = map[string]*fakeFile{
|
||||
"migrations": {
|
||||
name: "migrations",
|
||||
},
|
||||
{{ .Values }}
|
||||
}
|
||||
`))
|
||||
|
||||
var migrationsValueTemplate = template.Must(template.New("").Parse(
|
||||
`"migrations/{{ .Name }}": {
|
||||
name: "{{ .Name }}",
|
||||
bytes: []byte(` + "`\n{{ .Contents }}\n`" + `),
|
||||
},
|
||||
`))
|
||||
@ -0,0 +1,7 @@
|
||||
// +build genmigrations
|
||||
|
||||
package main
|
||||
|
||||
func main() {
|
||||
generate("postgres")
|
||||
}
|
||||
@ -0,0 +1,225 @@
|
||||
// Code generated by "make migrations"; DO NOT EDIT.
|
||||
package migrations
|
||||
|
||||
var postgresMigrations = map[string]*fakeFile{
|
||||
"migrations": {
|
||||
name: "migrations",
|
||||
},
|
||||
"migrations/01_oplog.up.sql": {
|
||||
name: "01_oplog.up.sql",
|
||||
bytes: []byte(`
|
||||
CREATE TABLE if not exists oplog_entry (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
version text NOT NULL,
|
||||
aggregate_name text NOT NULL,
|
||||
"data" bytea NOT NULL
|
||||
);
|
||||
CREATE TABLE if not exists oplog_ticket (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
"name" text NOT NULL UNIQUE,
|
||||
"version" bigint NOT NULL
|
||||
);
|
||||
CREATE TABLE if not exists oplog_metadata (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
entry_id bigint NOT NULL REFERENCES oplog_entry(id) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
"key" text NOT NULL,
|
||||
value text NULL
|
||||
);
|
||||
create index if not exists idx_oplog_metatadata_key on oplog_metadata(key);
|
||||
create index if not exists idx_oplog_metatadata_value on oplog_metadata(value);
|
||||
INSERT INTO oplog_ticket (name, version)
|
||||
values
|
||||
('default', 1),
|
||||
('iam_scope', 1),
|
||||
('iam_user', 1),
|
||||
('iam_auth_method', 1),
|
||||
('iam_group', 1),
|
||||
('iam_group_member_user', 1),
|
||||
('iam_role', 1),
|
||||
('iam_role_grant', 1),
|
||||
('iam_role_group', 1),
|
||||
('iam_role_user', 1),
|
||||
('db_test_user', 1),
|
||||
('db_test_car', 1),
|
||||
('db_test_rental', 1);
|
||||
`),
|
||||
},
|
||||
"migrations/02_domain_types.down.sql": {
|
||||
name: "02_domain_types.down.sql",
|
||||
bytes: []byte(`
|
||||
begin;
|
||||
|
||||
drop domain wt_public_id;
|
||||
|
||||
commit;
|
||||
|
||||
`),
|
||||
},
|
||||
"migrations/02_domain_types.up.sql": {
|
||||
name: "02_domain_types.up.sql",
|
||||
bytes: []byte(`
|
||||
begin;
|
||||
|
||||
create domain wt_public_id as text
|
||||
check(
|
||||
length(trim(value)) > 10
|
||||
);
|
||||
comment on domain wt_public_id is
|
||||
'Random ID generated with github.com/hashicorp/vault/sdk/helper/base62';
|
||||
|
||||
commit;
|
||||
|
||||
`),
|
||||
},
|
||||
"migrations/03_db.down.sql": {
|
||||
name: "03_db.down.sql",
|
||||
bytes: []byte(`
|
||||
drop table if exists db_test_user;
|
||||
drop table if exists db_test_car;
|
||||
drop table if exists db_test_rental;
|
||||
`),
|
||||
},
|
||||
"migrations/03_db.up.sql": {
|
||||
name: "03_db.up.sql",
|
||||
bytes: []byte(`
|
||||
-- create test tables used in the unit tests for the internal/db package
|
||||
-- these tables (db_test_user, db_test_car, db_test_rental) are not part
|
||||
-- of the Watchtower domain model... they are simply used for testing the internal/db package
|
||||
CREATE TABLE if not exists db_test_user (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
public_id text NOT NULL UNIQUE,
|
||||
name text UNIQUE,
|
||||
phone_number text,
|
||||
email text
|
||||
);
|
||||
CREATE TABLE if not exists db_test_car (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
public_id text NOT NULL UNIQUE,
|
||||
name text UNIQUE,
|
||||
model text,
|
||||
mpg smallint
|
||||
);
|
||||
CREATE TABLE if not exists db_test_rental (
|
||||
id bigint generated always as identity primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
public_id text NOT NULL UNIQUE,
|
||||
name text UNIQUE,
|
||||
user_id bigint not null REFERENCES db_test_user(id),
|
||||
car_id bigint not null REFERENCES db_test_car(id)
|
||||
);
|
||||
`),
|
||||
},
|
||||
"migrations/04_iam.down.sql": {
|
||||
name: "04_iam.down.sql",
|
||||
bytes: []byte(`
|
||||
BEGIN;
|
||||
|
||||
drop table if exists iam_scope CASCADE;
|
||||
drop trigger if exists iam_scope_insert;
|
||||
drop function if exists iam_sub_scopes_func;
|
||||
|
||||
COMMIT;
|
||||
`),
|
||||
},
|
||||
"migrations/04_iam.up.sql": {
|
||||
name: "04_iam.up.sql",
|
||||
bytes: []byte(`
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE if not exists iam_scope_type_enm (
|
||||
string text NOT NULL primary key CHECK(string IN ('unknown', 'organization', 'project'))
|
||||
);
|
||||
INSERT INTO iam_scope_type_enm (string)
|
||||
values
|
||||
('unknown'),
|
||||
('organization'),
|
||||
('project');
|
||||
|
||||
|
||||
CREATE TABLE if not exists iam_scope (
|
||||
public_id wt_public_id primary key,
|
||||
create_time timestamp with time zone default current_timestamp,
|
||||
update_time timestamp with time zone default current_timestamp,
|
||||
name text,
|
||||
type text NOT NULL REFERENCES iam_scope_type_enm(string) CHECK(
|
||||
(
|
||||
type = 'organization'
|
||||
and parent_id = NULL
|
||||
)
|
||||
or (
|
||||
type = 'project'
|
||||
and parent_id IS NOT NULL
|
||||
)
|
||||
),
|
||||
description text,
|
||||
parent_id text REFERENCES iam_scope(public_id) ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
create table if not exists iam_scope_organization (
|
||||
scope_id wt_public_id NOT NULL UNIQUE REFERENCES iam_scope(public_id) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
name text UNIQUE,
|
||||
primary key(scope_id)
|
||||
);
|
||||
create table if not exists iam_scope_project (
|
||||
scope_id wt_public_id NOT NULL REFERENCES iam_scope(public_id) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
parent_id wt_public_id NOT NULL REFERENCES iam_scope_organization(scope_id) ON DELETE CASCADE ON UPDATE CASCADE,
|
||||
name text,
|
||||
unique(parent_id, name),
|
||||
primary key(scope_id, parent_id)
|
||||
);
|
||||
|
||||
|
||||
CREATE
|
||||
OR REPLACE FUNCTION iam_sub_scopes_func() RETURNS TRIGGER
|
||||
SET SCHEMA
|
||||
'public' LANGUAGE plpgsql AS $$ DECLARE parent_type INT;
|
||||
BEGIN IF new.type = 'organization' THEN
|
||||
insert into iam_scope_organization (scope_id, name)
|
||||
values
|
||||
(new.public_id, new.name);
|
||||
return NEW;
|
||||
END IF;
|
||||
IF new.type = 'project' THEN
|
||||
insert into iam_scope_project (scope_id, parent_id, name)
|
||||
values
|
||||
(new.public_id, new.parent_id, new.name);
|
||||
return NEW;
|
||||
END IF;
|
||||
RAISE EXCEPTION 'unknown scope type';
|
||||
END;
|
||||
$$;
|
||||
|
||||
|
||||
CREATE TRIGGER iam_scope_insert
|
||||
AFTER
|
||||
insert ON iam_scope FOR EACH ROW EXECUTE PROCEDURE iam_sub_scopes_func();
|
||||
|
||||
|
||||
CREATE
|
||||
OR REPLACE FUNCTION iam_immutable_scope_type_func() RETURNS TRIGGER
|
||||
SET SCHEMA
|
||||
'public' LANGUAGE plpgsql AS $$ DECLARE parent_type INT;
|
||||
BEGIN IF new.type != old.type THEN
|
||||
RAISE EXCEPTION 'scope type cannot be updated';
|
||||
END IF;
|
||||
return NEW;
|
||||
END;
|
||||
$$;
|
||||
|
||||
CREATE TRIGGER iam_scope_update
|
||||
BEFORE
|
||||
update ON iam_scope FOR EACH ROW EXECUTE PROCEDURE iam_immutable_scope_type_func();
|
||||
|
||||
COMMIT;
|
||||
`),
|
||||
},
|
||||
}
|
||||
Loading…
Reference in new issue