From 804d59722a8931e77fd8b22c549d9643aa32fc89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toma=C5=BE=20Jerman?= Date: Sun, 19 Feb 2023 14:02:30 +0100 Subject: [PATCH] Base envoy codegen --- server/automation/envoy/store_decode.gen.go | 256 ++ server/automation/envoy/store_decode.go | 34 + server/automation/envoy/store_encode.gen.go | 465 +++ server/automation/envoy/store_encode.go | 19 + server/automation/envoy/yaml_decode.gen.go | 804 +++++ server/automation/envoy/yaml_decode.go | 14 + server/automation/envoy/yaml_encode.gen.go | 327 ++ server/automation/session.cue | 4 + server/automation/trigger.cue | 10 + server/automation/types/workflow.go | 5 +- server/automation/workflow.cue | 20 +- .../gocode/envoy/store_decode.go.tpl | 12 +- .../gocode/envoy/store_encode.go.tpl | 6 +- .../templates/gocode/envoy/yaml_decode.go.tpl | 61 +- .../templates/gocode/envoy/yaml_encode.go.tpl | 4 +- server/codegen/schema/model.cue | 7 + server/codegen/schema/resource.cue | 16 +- server/compose/attachment.cue | 4 + server/compose/chart.cue | 22 + server/compose/envoy/store_decode.gen.go | 573 ++++ server/compose/envoy/store_decode.go | 117 + server/compose/envoy/store_encode.gen.go | 1021 ++++++ server/compose/envoy/store_encode.go | 45 + server/compose/envoy/yaml_decode.gen.go | 1596 +++++++++ server/compose/envoy/yaml_decode.go | 66 + server/compose/envoy/yaml_encode.gen.go | 525 +++ server/compose/envoy/yaml_encode.go | 13 + server/compose/module.cue | 24 + server/compose/module_field.cue | 23 +- server/compose/namespace.cue | 18 +- server/compose/page.cue | 20 +- server/compose/record.cue | 5 + server/compose/record_revision.cue | 4 + server/compose/types/page.go | 15 +- server/federation/module_exposed.cue | 3 + server/federation/module_mapping.cue | 4 + server/federation/node.cue | 4 + server/federation/node_sync.cue | 4 + server/federation/shared_module.cue | 4 + server/pkg/envoyx/node.go | 10 + server/pkg/envoyx/utils.gen.go | 36 + server/store/adapters/rdbms/filters.gen.go | 26 +- server/system/apigw_filter.cue | 17 +- server/system/apigw_route.cue | 19 +- server/system/application.cue | 17 + server/system/attachment.cue | 4 + server/system/auth_client.cue | 16 +- server/system/auth_confirmed_client.cue | 4 + server/system/auth_oa2token.cue | 4 + server/system/auth_session.cue | 4 + server/system/credential.cue | 4 + server/system/dal_connection.cue | 13 +- server/system/dal_sensitivity_level.cue | 14 +- server/system/data_privacy_request.cue | 4 + .../system/data_privacy_request_comment.cue | 4 + server/system/envoy/store_decode.gen.go | 1035 ++++++ server/system/envoy/store_decode.go | 12 + server/system/envoy/store_encode.gen.go | 2131 ++++++++++++ server/system/envoy/store_encode.go | 91 + server/system/envoy/yaml_decode.gen.go | 2946 +++++++++++++++++ server/system/envoy/yaml_decode.go | 10 + server/system/envoy/yaml_encode.gen.go | 1086 ++++++ server/system/queue.cue | 17 + server/system/queue_message.cue | 4 + server/system/reminder.cue | 4 + server/system/report.cue | 9 + server/system/resource_translation.cue | 5 + server/system/rest/dal_connection.go | 10 +- server/system/rest/data_privacy.go | 6 +- server/system/rest/sensitivity_level.go | 2 +- server/system/role.cue | 9 + server/system/role_member.cue | 4 + server/system/settings.cue | 4 + server/system/template.cue | 9 + server/system/types/apigw_filter.go | 3 +- server/system/types/apigw_route.go | 7 +- server/system/types/applications.go | 8 +- server/system/types/auth_client.go | 5 +- server/system/types/dal_connection.go | 6 +- server/system/types/dal_sensitivity_level.go | 6 +- server/system/types/getters_setters.gen.go | 84 + server/system/types/queue.go | 4 +- server/system/types/user.go | 10 +- server/system/user.cue | 10 +- 84 files changed, 13840 insertions(+), 67 deletions(-) create mode 100644 server/automation/envoy/store_decode.gen.go create mode 100644 server/automation/envoy/store_decode.go create mode 100644 server/automation/envoy/store_encode.gen.go create mode 100644 server/automation/envoy/store_encode.go create mode 100644 server/automation/envoy/yaml_decode.gen.go create mode 100644 server/automation/envoy/yaml_decode.go create mode 100644 server/automation/envoy/yaml_encode.gen.go create mode 100644 server/compose/envoy/store_decode.gen.go create mode 100644 server/compose/envoy/store_decode.go create mode 100644 server/compose/envoy/store_encode.gen.go create mode 100644 server/compose/envoy/store_encode.go create mode 100644 server/compose/envoy/yaml_decode.gen.go create mode 100644 server/compose/envoy/yaml_decode.go create mode 100644 server/compose/envoy/yaml_encode.gen.go create mode 100644 server/compose/envoy/yaml_encode.go create mode 100644 server/pkg/envoyx/utils.gen.go create mode 100644 server/system/envoy/store_decode.gen.go create mode 100644 server/system/envoy/store_decode.go create mode 100644 server/system/envoy/store_encode.gen.go create mode 100644 server/system/envoy/store_encode.go create mode 100644 server/system/envoy/yaml_decode.gen.go create mode 100644 server/system/envoy/yaml_decode.go create mode 100644 server/system/envoy/yaml_encode.gen.go diff --git a/server/automation/envoy/store_decode.gen.go b/server/automation/envoy/store_decode.gen.go new file mode 100644 index 000000000..ba1c64f39 --- /dev/null +++ b/server/automation/envoy/store_decode.gen.go @@ -0,0 +1,256 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + + "github.com/cortezaproject/corteza/server/automation/types" + "github.com/cortezaproject/corteza/server/pkg/dal" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/store" +) + +type ( + // StoreDecoder is responsible for fetching already stored Corteza resources + // which are then managed by envoy and imported via an encoder. + StoreDecoder struct{} +) + +// Decode returns a set of envoy nodes based on the provided params +// +// StoreDecoder expects the DecodeParam of `storer` and `dal` which conform +// to the store.Storer and dal.FullService interfaces. +func (d StoreDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + var ( + s store.Storer + dl dal.FullService + ) + + // @todo we can optionally not require them based on what we're doing + if auxS, ok := p.Params["storer"]; ok { + s = auxS.(store.Storer) + } + if auxDl, ok := p.Params["dal"]; ok { + dl = auxDl.(dal.FullService) + } + + return d.decode(ctx, s, dl, p) +} + +func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullService, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Transform passed filters into an ordered structure + type ( + filterWrap struct { + rt string + f envoyx.ResourceFilter + } + ) + wrappedFilters := make([]filterWrap, 0, len(p.Filter)) + for rt, f := range p.Filter { + wrappedFilters = append(wrappedFilters, filterWrap{rt: rt, f: f}) + } + + // Get all requested scopes + scopedNodes := make(envoyx.NodeSet, len(p.Filter)) + + // @note skipping scope logic since it's currently only supported within + // Compose resources. + + // Get all requested references + // + // Keep an index for the Node and one for the reference to make our + // lives easier. + refNodes := make([]map[string]*envoyx.Node, len(p.Filter)) + refRefs := make([]map[string]envoyx.Ref, len(p.Filter)) + for i, a := range wrappedFilters { + if len(a.f.Refs) == 0 { + continue + } + + auxr := make(map[string]*envoyx.Node, len(a.f.Refs)) + auxa := make(map[string]envoyx.Ref) + for field, ref := range a.f.Refs { + f := ref.ResourceFilter() + aux, err := d.decode(ctx, s, dl, envoyx.DecodeParams{ + Type: envoyx.DecodeTypeStore, + Filter: f, + }) + if err != nil { + return nil, err + } + if len(aux) == 0 { + return nil, fmt.Errorf("invalid reference %v", ref) + } + if len(aux) > 1 { + return nil, fmt.Errorf("ambiguous reference: too many resources returned %v", a.f) + } + + auxr[field] = aux[0] + auxa[field] = aux[0].ToRef() + } + + refNodes[i] = auxr + refRefs[i] = auxa + } + + var aux envoyx.NodeSet + for i, wf := range wrappedFilters { + switch wf.rt { + case types.WorkflowResourceType: + aux, err = d.decodeWorkflow(ctx, s, dl, d.makeWorkflowFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.TriggerResourceType: + aux, err = d.decodeTrigger(ctx, s, dl, d.makeTriggerFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + } + } + + return +} // // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource workflow +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeWorkflow(ctx context.Context, s store.Storer, dl dal.FullService, f types.WorkflowFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchAutomationWorkflows(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "OwnedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnedBy), + }, + // Handle references + "RunAs": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.RunAs), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.WorkflowResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +// Resource should define a custom filter builder + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource trigger +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeTrigger(ctx context.Context, s store.Storer, dl dal.FullService, f types.TriggerFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchAutomationTriggers(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "OwnedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + // Handle references + "WorkflowID": envoyx.Ref{ + ResourceType: "corteza::automation:workflow", + Identifiers: envoyx.MakeIdentifiers(r.WorkflowID), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.TriggerResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +// Resource should define a custom filter builder diff --git a/server/automation/envoy/store_decode.go b/server/automation/envoy/store_decode.go new file mode 100644 index 000000000..67d43d0d9 --- /dev/null +++ b/server/automation/envoy/store_decode.go @@ -0,0 +1,34 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/automation/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" +) + +func (d StoreDecoder) makeWorkflowFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.WorkflowFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.IdentsAsStrings() + _ = ids + _ = hh + + out.WorkflowID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + return +} + +func (d StoreDecoder) makeTriggerFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.TriggerFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.TriggerID = ids + + return +} diff --git a/server/automation/envoy/store_encode.gen.go b/server/automation/envoy/store_encode.gen.go new file mode 100644 index 000000000..b315fe845 --- /dev/null +++ b/server/automation/envoy/store_encode.gen.go @@ -0,0 +1,465 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "strconv" + + "github.com/cortezaproject/corteza/server/automation/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/id" + "github.com/cortezaproject/corteza/server/store" +) + +type ( + // StoreEncoder is responsible for encoding Corteza resources into the + // database via the Storer or the DAL interface + // + // @todo consider having a different encoder for the DAL resources + StoreEncoder struct{} +) + +// Prepare performs some initial processing on the resource before it can be encoded +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Prepare(ctx context.Context, p envoyx.EncodeParams, rt string, nn envoyx.NodeSet) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.WorkflowResourceType: + return e.prepareWorkflow(ctx, p, s, nn) + + case types.TriggerResourceType: + return e.prepareTrigger(ctx, p, s, nn) + } + + return +} + +// Encode encodes the given Corteza resources into the primary store +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// The Encode function is called for every resource type where the resource +// appears at the root of the dependency tree. +// All of the root-level resources for that resource type are passed into the function. +// The encoding function must traverse the branches to encode all of the dependencies. +// +// This flow is used to simplify the flow of how resources are encoded into YAML +// (and other documents) as well as to simplify batching. +// +// Encode does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tree envoyx.Traverser) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.WorkflowResourceType: + return e.encodeWorkflows(ctx, p, s, nodes, tree) + + case types.TriggerResourceType: + return e.encodeTriggers(ctx, p, s, nodes, tree) + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource workflow +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareWorkflow prepares the resources of the given type for encoding +func (e StoreEncoder) prepareWorkflow(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Workflow, len(nn)) + err = e.matchupWorkflows(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareWorkflow with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Workflow) + if !ok { + panic("unexpected resource type: node expecting type of workflow") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setWorkflowDefaults(res) + if err != nil { + return err + } + + err = e.validateWorkflow(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeWorkflows encodes a set of resource into the database +func (e StoreEncoder) encodeWorkflows(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeWorkflow(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeWorkflow encodes the resource into the database +func (e StoreEncoder) encodeWorkflow(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertAutomationWorkflow(ctx, s, n.Resource.(*types.Workflow)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupWorkflows returns an index with indicates what resources already exist +func (e StoreEncoder) matchupWorkflows(ctx context.Context, s store.Storer, uu map[int]types.Workflow, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchAutomationWorkflows(ctx, s, types.WorkflowFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Workflow, len(aa)) + strMap := make(map[string]*types.Workflow, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Workflow + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource trigger +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareTrigger prepares the resources of the given type for encoding +func (e StoreEncoder) prepareTrigger(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Trigger, len(nn)) + err = e.matchupTriggers(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareTrigger with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Trigger) + if !ok { + panic("unexpected resource type: node expecting type of trigger") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setTriggerDefaults(res) + if err != nil { + return err + } + + err = e.validateTrigger(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeTriggers encodes a set of resource into the database +func (e StoreEncoder) encodeTriggers(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeTrigger(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeTrigger encodes the resource into the database +func (e StoreEncoder) encodeTrigger(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertAutomationTrigger(ctx, s, n.Resource.(*types.Trigger)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupTriggers returns an index with indicates what resources already exist +func (e StoreEncoder) matchupTriggers(ctx context.Context, s store.Storer, uu map[int]types.Trigger, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchAutomationTriggers(ctx, s, types.TriggerFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Trigger, len(aa)) + strMap := make(map[string]*types.Trigger, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + + } + + var aux *types.Trigger + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e *StoreEncoder) grabStorer(p envoyx.EncodeParams) (s store.Storer, err error) { + auxs, ok := p.Params["storer"] + if !ok { + err = fmt.Errorf("storer not defined") + return + } + + s, ok = auxs.(store.Storer) + if !ok { + err = fmt.Errorf("invalid storer provided") + return + } + + return +} diff --git a/server/automation/envoy/store_encode.go b/server/automation/envoy/store_encode.go new file mode 100644 index 000000000..8b8a9a2aa --- /dev/null +++ b/server/automation/envoy/store_encode.go @@ -0,0 +1,19 @@ +package envoy + +import "github.com/cortezaproject/corteza/server/automation/types" + +func (e StoreEncoder) setWorkflowDefaults(res *types.Workflow) (err error) { + return +} + +func (e StoreEncoder) validateWorkflow(res *types.Workflow) (err error) { + return +} + +func (e StoreEncoder) setTriggerDefaults(res *types.Trigger) (err error) { + return +} + +func (e StoreEncoder) validateTrigger(res *types.Trigger) (err error) { + return +} diff --git a/server/automation/envoy/yaml_decode.gen.go b/server/automation/envoy/yaml_decode.gen.go new file mode 100644 index 000000000..1fefcd186 --- /dev/null +++ b/server/automation/envoy/yaml_decode.gen.go @@ -0,0 +1,804 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/cortezaproject/corteza/server/automation/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/rbac" + "github.com/cortezaproject/corteza/server/pkg/y7s" + systemTypes "github.com/cortezaproject/corteza/server/system/types" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" +) + +type ( + // YamlDecoder is responsible for decoding YAML documents into Corteza resources + // which are then managed by envoy and imported via an encoder. + YamlDecoder struct{} + documentContext struct { + references map[string]string + } + auxYamlDoc struct { + nodes envoyx.NodeSet + } +) + +// Decode returns a set of envoy nodes based on the provided params +// +// YamlDecoder expects the DecodeParam of `stream` which conforms +// to the io.Reader interface. +func (d YamlDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Get the reader + r, err := d.getReader(ctx, p) + if err != nil { + return + } + + // Offload decoding to the aux document + doc := &auxYamlDoc{} + err = yaml.NewDecoder(r).Decode(doc) + if err != nil { + return + } + + return doc.nodes, nil +} + +func (d *auxYamlDoc) UnmarshalYAML(n *yaml.Node) (err error) { + // Get the document context from the root level + dctx, err := d.getDocumentContext(n) + if err != nil { + return + } + + var aux envoyx.NodeSet + return y7s.EachMap(n, func(k, v *yaml.Node) error { + kv := strings.ToLower(k.Value) + + switch kv { + case "workflow", "workflows": + if y7s.IsMapping(v) { + aux, err = d.unmarshalWorkflowMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalWorkflowSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "trigger": + if y7s.IsSeq(v) { + aux, err = d.unmarshalTriggerSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + // Access control nodes + case "allow": + aux, err = unmarshalAllowNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + case "deny": + aux, err = unmarshalDenyNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Resource translation nodes + case "locale", "translation", "translations", "i18n": + aux, err = unmarshalLocaleNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Offload to custom handlers + default: + aux, err = d.unmarshalYAML(kv, v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + } + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource workflow +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalWorkflowSeq unmarshals Workflow when provided as a sequence node +func (d *auxYamlDoc) unmarshalWorkflowSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalWorkflowNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalWorkflowMap unmarshals Workflow when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalWorkflowMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalWorkflowNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalTriggersExtendedSeq unmarshals Triggers when provided as a sequence node +func (d *auxYamlDoc) unmarshalExtendedTriggersSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalTriggersExtendedNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalTriggersExtendedMap unmarshals Triggers when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalExtendedTriggersMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalTriggersExtendedNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalWorkflowNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalWorkflowNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Workflow + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownedby": + // Handle references + err = y7s.DecodeScalar(n, "ownedBy", &auxNodeValue) + if err != nil { + return err + } + refs["OwnedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "runas": + // Handle references + err = y7s.DecodeScalar(n, "runAs", &auxNodeValue) + if err != nil { + return err + } + refs["RunAs"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under workflow such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + case "triggers": + default: + if y7s.IsSeq(n) { + nestedNodes, err = d.unmarshalExtendedTriggersSeq(dctx, n) + if err != nil { + return err + } + } + break + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["WorkflowID"] = envoyx.Ref{ + ResourceType: types.WorkflowResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.WorkflowResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource trigger +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalTriggerSeq unmarshals Trigger when provided as a sequence node +func (d *auxYamlDoc) unmarshalTriggerSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalTriggerNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalTriggerMap unmarshals Trigger when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +// @note this resource does not support map encoding. +// Refer to the corresponding definition files to adjust if needed. + +// unmarshalTriggerNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalTriggerNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Trigger + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownedby": + // Handle references + err = y7s.DecodeScalar(n, "ownedBy", &auxNodeValue) + if err != nil { + return err + } + refs["OwnedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "workflowid": + // Handle references + err = y7s.DecodeScalar(n, "workflowID", &auxNodeValue) + if err != nil { + return err + } + refs["WorkflowID"] = envoyx.Ref{ + ResourceType: "corteza::automation:workflow", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under trigger such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["TriggerID"] = envoyx.Ref{ + ResourceType: types.TriggerResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.TriggerResourceType, + Identifiers: ii, + References: refs, + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// RBAC unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalAllowNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Allow) +} + +func unmarshalDenyNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Deny) +} + +func unmarshalRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + if y7s.IsMapping(n.Content[1]) { + return unmarshalNestedRBACNode(n, acc) + } + + return unmarshalFlatRBACNode(n, acc) +} + +// unmarshalNestedRBACNode handles RBAC rules when they are nested inside a resource +// +// The edge-case exists since the node doesn't explicitly specify the resource +// it belongs to. +// +// Example: +// +// modules: +// module1: +// name: "module 1" +// fields: ... +// allow: +// role1: +// - read +// - delete +func unmarshalNestedRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + // Handles role + return out, y7s.EachMap(n, func(role, perm *yaml.Node) error { + // Handles operation + return y7s.EachMap(perm, func(res, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Resource: res.Value, + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: envoyx.MergeRefs( + map[string]envoyx.Ref{"RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }}, + envoyx.SplitResourceIdentifier(res.Value), + ), + }) + return nil + }) + }) +} + +// unmarshalFlatRBACNode handles RBAC rules when they are provided on the root level +// +// Example: +// +// allow: +// role1: +// corteza::system/: +// - users.search +// - users.create +func unmarshalFlatRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(role, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: map[string]envoyx.Ref{ + "RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }, + }, + }) + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// i18n unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalLocaleNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(lang, loc *yaml.Node) error { + langTag := systemTypes.Lang{Tag: language.Make(lang.Value)} + + return y7s.EachMap(loc, func(res, kv *yaml.Node) error { + return y7s.EachMap(kv, func(k, msg *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &systemTypes.ResourceTranslation{ + Lang: langTag, + K: k.Value, + Message: msg.Value, + }, + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:resource-translation", + References: envoyx.SplitResourceIdentifier(res.Value), + }) + return nil + }) + }) + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utilities +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d YamlDecoder) getReader(ctx context.Context, p envoyx.DecodeParams) (r io.Reader, err error) { + aux, ok := p.Params["stream"] + if ok { + r, ok = aux.(io.Reader) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML decoder expects a stream conforming to io.Reader interface") + return +} + +func (d *auxYamlDoc) getDocumentContext(n *yaml.Node) (dctx documentContext, err error) { + dctx = documentContext{ + references: make(map[string]string), + } + + err = y7s.EachMap(n, func(k, v *yaml.Node) error { + // @todo expand when needed. The previous implementation only supported + // namespaces on the root of the document. + + if y7s.IsKind(v, yaml.ScalarNode) { + dctx.references[k.Value] = v.Value + } + + return nil + }) + + return +} diff --git a/server/automation/envoy/yaml_decode.go b/server/automation/envoy/yaml_decode.go new file mode 100644 index 000000000..cd731f653 --- /dev/null +++ b/server/automation/envoy/yaml_decode.go @@ -0,0 +1,14 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "gopkg.in/yaml.v3" +) + +func (d *auxYamlDoc) unmarshalYAML(k string, n *yaml.Node) (out envoyx.NodeSet, err error) { + return +} + +func (d *auxYamlDoc) unmarshalTriggersExtendedNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + return d.unmarshalTriggerNode(dctx, n, meta...) +} diff --git a/server/automation/envoy/yaml_encode.gen.go b/server/automation/envoy/yaml_encode.gen.go new file mode 100644 index 000000000..6c19f7746 --- /dev/null +++ b/server/automation/envoy/yaml_encode.gen.go @@ -0,0 +1,327 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/cortezaproject/corteza/server/automation/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/y7s" + "gopkg.in/yaml.v3" +) + +type ( + // YamlEncoder is responsible for encoding Corteza resources into + // a YAML supported format + YamlEncoder struct{} +) + +// Encode encodes the given Corteza resources into some YAML supported format +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e YamlEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tt envoyx.Traverser) (err error) { + var ( + out *yaml.Node + aux *yaml.Node + ) + _ = aux + + w, err := e.getWriter(p) + if err != nil { + return + } + + switch rt { + case types.WorkflowResourceType: + aux, err = e.encodeWorkflows(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "workflow", aux) + if err != nil { + return + } + + case types.TriggerResourceType: + aux, err = e.encodeTriggers(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "trigger", aux) + if err != nil { + return + } + } + + return yaml.NewEncoder(w).Encode(out) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource workflow +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeWorkflows(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeWorkflow(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeWorkflow focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeWorkflow(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Workflow) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxOwnedBy, err := e.encodeRef(p, res.OwnedBy, "OwnedBy", node, tt) + if err != nil { + return + } + + auxRunAs, err := e.encodeRef(p, res.RunAs, "RunAs", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "enabled", res.Enabled, + "handle", res.Handle, + "id", res.ID, + "issues", res.Issues, + "keepSessions", res.KeepSessions, + "meta", res.Meta, + "ownedBy", auxOwnedBy, + "paths", res.Paths, + "runAs", auxRunAs, + "scope", res.Scope, + "steps", res.Steps, + "trace", res.Trace, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource trigger +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeTriggers(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeTrigger(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeTrigger focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeTrigger(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Trigger) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxOwnedBy, err := e.encodeRef(p, res.OwnedBy, "OwnedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + auxWorkflowID, err := e.encodeRef(p, res.WorkflowID, "WorkflowID", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "constraints", res.Constraints, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "enabled", res.Enabled, + "eventType", res.EventType, + "id", res.ID, + "input", res.Input, + "meta", res.Meta, + "ownedBy", auxOwnedBy, + "resourceType", res.ResourceType, + "stepID", res.StepID, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + "workflowID", auxWorkflowID, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Encoding utils +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeTimestamp(p envoyx.EncodeParams, t time.Time) (any, error) { + if t.IsZero() { + return nil, nil + } + + tz := p.Config.PreferredTimezone + if tz != "" { + tzL, err := time.LoadLocation(tz) + if err != nil { + return nil, err + } + t = t.In(tzL) + } + + ly := p.Config.PreferredTimeLayout + if ly == "" { + ly = time.RFC3339 + } + + return t.Format(ly), nil +} + +func (e YamlEncoder) encodeTimestampNil(p envoyx.EncodeParams, t *time.Time) (any, error) { + if t == nil { + return nil, nil + } + + // @todo timestamp encoding format + return e.encodeTimestamp(p, *t) +} + +func (e YamlEncoder) encodeRef(p envoyx.EncodeParams, id uint64, field string, node *envoyx.Node, tt envoyx.Traverser) (any, error) { + parent := tt.ParentForRef(node, node.References[field]) + + // @todo should we panic instead? + // for now gracefully fallback to the ID + if parent == nil { + return id, nil + } + + return node.Identifiers.FriendlyIdentifier(), nil +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) getWriter(p envoyx.EncodeParams) (out io.Writer, err error) { + aux, ok := p.Params["writer"] + if ok { + out, ok = aux.(io.Writer) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML encoder expects a writer conforming to io.Writer interface") + return +} diff --git a/server/automation/session.cue b/server/automation/session.cue index 947d325fe..d31fce6f2 100644 --- a/server/automation/session.cue +++ b/server/automation/session.cue @@ -80,6 +80,10 @@ session: { } } + envoy: { + omit: true + } + filter: { struct: { session_id: { goType: "[]uint64", storeIdent: "id", ident: "sessionID" } diff --git a/server/automation/trigger.cue b/server/automation/trigger.cue index e31c1a85a..262ad0b8f 100644 --- a/server/automation/trigger.cue +++ b/server/automation/trigger.cue @@ -70,6 +70,16 @@ trigger: { } } + envoy: { + yaml: { + supportMappedInput: false + identKeyAlias: [] + } + store: { + customFilterBuilder: true + } + } + filter: { struct: { deleted: { goType: "filter.State", storeIdent: "deleted_at" } diff --git a/server/automation/types/workflow.go b/server/automation/types/workflow.go index 88c21f2c0..be6916a99 100644 --- a/server/automation/types/workflow.go +++ b/server/automation/types/workflow.go @@ -4,9 +4,10 @@ import ( "database/sql/driver" "encoding/json" "fmt" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/expr" "github.com/cortezaproject/corteza/server/pkg/filter" ) @@ -48,6 +49,8 @@ type ( WorkflowFilter struct { WorkflowID []string `json:"workflowID"` + Handle string `json:"handle"` + Query string `json:"query"` Deleted filter.State `json:"deleted"` diff --git a/server/automation/workflow.cue b/server/automation/workflow.cue index f90346110..9cfe776a1 100644 --- a/server/automation/workflow.cue +++ b/server/automation/workflow.cue @@ -70,16 +70,34 @@ workflow: { } } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["workflows"] + extendedResourceDecoders: [{ + ident: "triggers" + expIdent: "Triggers" + supportMappedInput: false + identKeys: ["triggers"] + }] + } + store: { + customFilterBuilder: true + } + } + filter: { struct: { workflow_id: { goType: "[]string", ident: "workflowID", storeIdent: "id" } + handle: { goType: "string" } sub_workflow: { goType: "filter.State" } deleted: { goType: "filter.State", storeIdent: "deleted_at" } disabled: { goType: "filter.State", storeIdent: "enabled" } } query: ["handle"] - byValue: ["workflow_id"] + byValue: ["workflow_id", "handle"] byNilState: ["deleted"] byFalseState: ["disabled"] } diff --git a/server/codegen/assets/templates/gocode/envoy/store_decode.go.tpl b/server/codegen/assets/templates/gocode/envoy/store_decode.go.tpl index 288dc3953..9241eeccd 100644 --- a/server/codegen/assets/templates/gocode/envoy/store_decode.go.tpl +++ b/server/codegen/assets/templates/gocode/envoy/store_decode.go.tpl @@ -57,6 +57,7 @@ func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullSer // Get all requested scopes scopedNodes := make(envoyx.NodeSet, len(p.Filter)) + {{ if eq .componentIdent "compose" }} for i, a := range wrappedFilters { if a.f.Scope.ResourceType == "" { continue @@ -64,7 +65,7 @@ func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullSer // For now the scope can only point to namespace so this will do var nn envoyx.NodeSet - nn, err = d.decodeNamespace(ctx, s, dl, d.identToNamespaceFilter(a.f.Scope.Identifiers)) + nn, err = d.decodeNamespace(ctx, s, dl, d.makeNamespaceFilter(nil, nil, envoyx.ResourceFilter{Identifiers: a.f.Scope.Identifiers})) if err != nil { return } @@ -79,6 +80,10 @@ func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullSer scopedNodes[i] = nn[0] } + {{ else }} + // @note skipping scope logic since it's currently only supported within + // Compose resources. + {{ end }} // Get all requested references // @@ -121,7 +126,7 @@ func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullSer for i, wf := range wrappedFilters { switch wf.rt { {{- range .resources -}} - {{- if or .envoy.omit (not .envoy.use)}}{{continue}}{{ end -}} + {{- if .envoy.omit}}{{continue}}{{ end -}} case types.{{.expIdent}}ResourceType: aux, err = d.decode{{.expIdent}}(ctx, s, dl, d.make{{.expIdent}}Filter(scopedNodes[i], refNodes[i], wf.f)) @@ -142,7 +147,7 @@ func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullSer } {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{ end -}} @@ -248,6 +253,7 @@ func (d StoreDecoder) make{{.expIdent}}Filter(scope *envoyx.Node, refs map[strin _ = ar _ = ok {{ range .model.attributes }} + {{- if .envoy.store.omitRefFilter }}{{continue}}{{ end }} {{ if eq .dal.type "Ref" }} ar, ok = refs["{{ .expIdent }}"] if ok { diff --git a/server/codegen/assets/templates/gocode/envoy/store_encode.go.tpl b/server/codegen/assets/templates/gocode/envoy/store_encode.go.tpl index fb2788bd7..4c9b52d2c 100644 --- a/server/codegen/assets/templates/gocode/envoy/store_encode.go.tpl +++ b/server/codegen/assets/templates/gocode/envoy/store_encode.go.tpl @@ -46,7 +46,7 @@ func (e StoreEncoder) Prepare(ctx context.Context, p envoyx.EncodeParams, rt str switch rt { {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{end -}} @@ -81,7 +81,7 @@ func (e StoreEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt stri switch rt { {{- range .resources }} -{{- if or .envoy.omit (not .envoy.use) -}} +{{- if .envoy.omit -}} {{continue}} {{end}} case types.{{.expIdent}}ResourceType: @@ -93,7 +93,7 @@ func (e StoreEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt stri } {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{end}} diff --git a/server/codegen/assets/templates/gocode/envoy/yaml_decode.go.tpl b/server/codegen/assets/templates/gocode/envoy/yaml_decode.go.tpl index 0e8ef10a1..2ee40b1b9 100644 --- a/server/codegen/assets/templates/gocode/envoy/yaml_decode.go.tpl +++ b/server/codegen/assets/templates/gocode/envoy/yaml_decode.go.tpl @@ -68,7 +68,7 @@ func (d *auxYamlDoc) UnmarshalYAML(n *yaml.Node) (err error) { switch kv { {{- range .resources -}} - {{- if or .envoy.omit (not .envoy.use) -}} + {{- if .envoy.omit -}} {{continue}} {{- end -}} @@ -127,7 +127,7 @@ func (d *auxYamlDoc) UnmarshalYAML(n *yaml.Node) (err error) { {{ $rootRes := .resources }} {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{ end -}} @@ -175,6 +175,43 @@ func (d *auxYamlDoc) unmarshal{{ .expIdent }}Map(dctx documentContext, n *yaml.N } {{ end }} +{{ range .envoy.yaml.extendedResourceDecoders -}} +// unmarshal{{.expIdent}}ExtendedSeq unmarshals {{.expIdent}} when provided as a sequence node +func (d *auxYamlDoc) unmarshalExtended{{.expIdent}}Seq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshal{{ .expIdent }}ExtendedNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshal{{.expIdent}}ExtendedMap unmarshals {{.expIdent}} when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalExtended{{ .expIdent }}Map(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshal{{ .expIdent }}ExtendedNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} +{{ end }} + // unmarshal{{ .expIdent }}Node is a cookie-cutter function to unmarshal // the yaml node into the corresponding Corteza type & Node func (d *auxYamlDoc) unmarshal{{ .expIdent }}Node(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { @@ -425,6 +462,26 @@ func (d *auxYamlDoc) unmarshal{{ .expIdent }}Node(dctx documentContext, n *yaml. {{break}} {{- end }} {{- end -}} + + {{- range .envoy.yaml.extendedResourceDecoders }} + {{ $identKeys := .identKeys }} + case {{ range $i, $l := $identKeys -}} + "{{ $l }}"{{if not (eq $i (sub (len $identKeys) 1))}},{{end}} + {{- end}}: + default: + if y7s.IsSeq(n) { + nestedNodes, err = d.unmarshalExtended{{.expIdent}}Seq(dctx, n) + if err != nil { + return err + } + } {{- if .supportMappedInput }} else { + nestedNodes, err = d.unmarshalExtended{{.expIdent}}Map(dctx, n) + if err != nil { + return err + } + }{{ end }} + break + {{ end -}} } // Iterate nested nodes and update their reference to the current resource diff --git a/server/codegen/assets/templates/gocode/envoy/yaml_encode.go.tpl b/server/codegen/assets/templates/gocode/envoy/yaml_encode.go.tpl index 9a8f2d337..1d90a80cb 100644 --- a/server/codegen/assets/templates/gocode/envoy/yaml_encode.go.tpl +++ b/server/codegen/assets/templates/gocode/envoy/yaml_encode.go.tpl @@ -51,7 +51,7 @@ func (e YamlEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt strin switch rt { {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{ end -}} @@ -74,7 +74,7 @@ func (e YamlEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt strin {{ $rootRes := .resources }} {{- range .resources }} - {{- if or .envoy.omit (not .envoy.use)}} + {{- if .envoy.omit}} {{continue}} {{ end -}} diff --git a/server/codegen/schema/model.cue b/server/codegen/schema/model.cue index 3234b15a9..d993843d2 100644 --- a/server/codegen/schema/model.cue +++ b/server/codegen/schema/model.cue @@ -107,6 +107,8 @@ import ( // defines a custom field identifier when constructing // resource filters and assigning reference constraints filterRefField: string | *"" + + omitRefFilter: bool | *false } } @@ -224,6 +226,11 @@ HandleField: { AttributeUserRef: { goType: "uint64" dal: { type: "Ref", refModelResType: "corteza::system:user", default: 0 } + envoy: { + store: { + omitRefFilter: true + } + } } SortableTimestampField: { diff --git a/server/codegen/schema/resource.cue b/server/codegen/schema/resource.cue index 7055feda6..8d5413168 100644 --- a/server/codegen/schema/resource.cue +++ b/server/codegen/schema/resource.cue @@ -90,10 +90,7 @@ import ( } envoy?: #resourceEnvoy & { - // @todo temporary; easier development on less resources - use: bool | *false omit: bool | *false - $resourceIdent: ident } @@ -147,8 +144,6 @@ import ( #resourceEnvoy: { $resourceIdent: string - // @todo remove use, temporary for now - use: bool omit: bool // Scoped resources prioritize matching with resources in the same scope. @@ -174,13 +169,22 @@ import ( supportMappedInput: bool | *true // mappedField controls what identifier the map key represents // @todo this can probably be inferred so consider removing it. - mappedField: string + mappedField: string | *"" identKeyLabel: string | *strings.ToLower($resourceIdent) identKeyAlias: [...string] | *[] // identKeys defines all of the identifiers that can be used when // referencing this resource identKeys: [...string] | *([identKeyLabel]+identKeyAlias) + + extendedResourceDecoders: [...{ + ident: string + expIdent: string + identKeys: [...string] + + supportMappedInput: bool | *true + mappedField: string | *"" + }] | *[] } // store decode/encode configs diff --git a/server/compose/attachment.cue b/server/compose/attachment.cue index 55c1bb938..8f99d88fb 100644 --- a/server/compose/attachment.cue +++ b/server/compose/attachment.cue @@ -70,6 +70,10 @@ attachment: { byValue: ["kind", "namespace_id"] } + envoy: { + omit: true + } + store: { ident: "composeAttachment" diff --git a/server/compose/chart.cue b/server/compose/chart.cue index 316c55275..eef6bb45c 100644 --- a/server/compose/chart.cue +++ b/server/compose/chart.cue @@ -19,6 +19,11 @@ chart: { goType: "uint64", storeIdent: "rel_namespace" dal: { type: "Ref", refModelResType: "corteza::compose:namespace" } + envoy: { + yaml: { + identKeyAlias: ["namespace", "namespace_id", "ns"] + } + } } name: { sortable: true @@ -29,6 +34,11 @@ chart: { dal: {} omitSetter: true omitGetter: true + envoy: { + yaml: { + customDecoder: true + } + } } created_at: schema.SortableTimestampNowField updated_at: schema.SortableTimestampNilField @@ -63,6 +73,18 @@ chart: { byNilState: ["deleted"] } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["charts", "chrt"] + } + store: { + extendedRefDecoder: true + } + } + rbac: { operations: { "read": {} diff --git a/server/compose/envoy/store_decode.gen.go b/server/compose/envoy/store_decode.gen.go new file mode 100644 index 000000000..3492650a8 --- /dev/null +++ b/server/compose/envoy/store_decode.gen.go @@ -0,0 +1,573 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/dal" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/store" +) + +type ( + // StoreDecoder is responsible for fetching already stored Corteza resources + // which are then managed by envoy and imported via an encoder. + StoreDecoder struct{} +) + +// Decode returns a set of envoy nodes based on the provided params +// +// StoreDecoder expects the DecodeParam of `storer` and `dal` which conform +// to the store.Storer and dal.FullService interfaces. +func (d StoreDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + var ( + s store.Storer + dl dal.FullService + ) + + // @todo we can optionally not require them based on what we're doing + if auxS, ok := p.Params["storer"]; ok { + s = auxS.(store.Storer) + } + if auxDl, ok := p.Params["dal"]; ok { + dl = auxDl.(dal.FullService) + } + + return d.decode(ctx, s, dl, p) +} + +func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullService, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Transform passed filters into an ordered structure + type ( + filterWrap struct { + rt string + f envoyx.ResourceFilter + } + ) + wrappedFilters := make([]filterWrap, 0, len(p.Filter)) + for rt, f := range p.Filter { + wrappedFilters = append(wrappedFilters, filterWrap{rt: rt, f: f}) + } + + // Get all requested scopes + scopedNodes := make(envoyx.NodeSet, len(p.Filter)) + + for i, a := range wrappedFilters { + if a.f.Scope.ResourceType == "" { + continue + } + + // For now the scope can only point to namespace so this will do + var nn envoyx.NodeSet + nn, err = d.decodeNamespace(ctx, s, dl, d.makeNamespaceFilter(nil, nil, envoyx.ResourceFilter{Identifiers: a.f.Scope.Identifiers})) + if err != nil { + return + } + if len(nn) > 1 { + err = fmt.Errorf("ambiguous scope %v", a.f.Scope) + return + } + if len(nn) == 0 { + err = fmt.Errorf("invalid scope: resource not found %v", a.f) + return + } + + scopedNodes[i] = nn[0] + } + + // Get all requested references + // + // Keep an index for the Node and one for the reference to make our + // lives easier. + refNodes := make([]map[string]*envoyx.Node, len(p.Filter)) + refRefs := make([]map[string]envoyx.Ref, len(p.Filter)) + for i, a := range wrappedFilters { + if len(a.f.Refs) == 0 { + continue + } + + auxr := make(map[string]*envoyx.Node, len(a.f.Refs)) + auxa := make(map[string]envoyx.Ref) + for field, ref := range a.f.Refs { + f := ref.ResourceFilter() + aux, err := d.decode(ctx, s, dl, envoyx.DecodeParams{ + Type: envoyx.DecodeTypeStore, + Filter: f, + }) + if err != nil { + return nil, err + } + if len(aux) == 0 { + return nil, fmt.Errorf("invalid reference %v", ref) + } + if len(aux) > 1 { + return nil, fmt.Errorf("ambiguous reference: too many resources returned %v", a.f) + } + + auxr[field] = aux[0] + auxa[field] = aux[0].ToRef() + } + + refNodes[i] = auxr + refRefs[i] = auxa + } + + var aux envoyx.NodeSet + for i, wf := range wrappedFilters { + switch wf.rt { + case types.ChartResourceType: + aux, err = d.decodeChart(ctx, s, dl, d.makeChartFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.ModuleResourceType: + aux, err = d.decodeModule(ctx, s, dl, d.makeModuleFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.ModuleFieldResourceType: + aux, err = d.decodeModuleField(ctx, s, dl, d.makeModuleFieldFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.NamespaceResourceType: + aux, err = d.decodeNamespace(ctx, s, dl, d.makeNamespaceFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.PageResourceType: + aux, err = d.decodePage(ctx, s, dl, d.makePageFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource chart +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeChart(ctx context.Context, s store.Storer, dl dal.FullService, f types.ChartFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchComposeCharts(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "NamespaceID": envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(r.NamespaceID), + }, + } + + refs = envoyx.MergeRefs(refs, d.decodeChartRefs(r)) + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + for k, ref := range refs { + ref.Scope = scope + refs[k] = ref + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ChartResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeChartFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ChartFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ChartID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + ar, ok = refs["NamespaceID"] + if ok { + out.NamespaceID = ar.Resource.GetID() + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource module +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeModule(ctx context.Context, s store.Storer, dl dal.FullService, f types.ModuleFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchComposeModules(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "NamespaceID": envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(r.NamespaceID), + }, + } + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + for k, ref := range refs { + ref.Scope = scope + refs[k] = ref + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ModuleResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + aux, err := d.extendedModuleDecoder(ctx, s, dl, f, out) + if err != nil { + return + } + out = append(out, aux...) + + return +} + +func (d StoreDecoder) makeModuleFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ModuleFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ModuleID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + ar, ok = refs["NamespaceID"] + if ok { + out.NamespaceID = ar.Resource.GetID() + } + + out = d.extendModuleFilter(scope, refs, auxf, out) + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource moduleField +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeModuleField(ctx context.Context, s store.Storer, dl dal.FullService, f types.ModuleFieldFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchComposeModuleFields(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + r.Name, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "ModuleID": envoyx.Ref{ + ResourceType: "corteza::compose:module", + Identifiers: envoyx.MakeIdentifiers(r.ModuleID), + }, + } + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + for k, ref := range refs { + ref.Scope = scope + refs[k] = ref + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ModuleFieldResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +// Resource should define a custom filter builder + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource namespace +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeNamespace(ctx context.Context, s store.Storer, dl dal.FullService, f types.NamespaceFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchComposeNamespaces(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + r.Slug, + ) + + refs := map[string]envoyx.Ref{} + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: types.NamespaceResourceType, + Identifiers: ii, + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.NamespaceResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeNamespaceFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.NamespaceFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.NamespaceID = ids + + if len(hh) > 0 { + out.Slug = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + out = d.extendNamespaceFilter(scope, refs, auxf, out) + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource page +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodePage(ctx context.Context, s store.Storer, dl dal.FullService, f types.PageFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchComposePages(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "ModuleID": envoyx.Ref{ + ResourceType: "corteza::compose:module", + Identifiers: envoyx.MakeIdentifiers(r.ModuleID), + }, + // Handle references + "NamespaceID": envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(r.NamespaceID), + }, + // Handle references + "SelfID": envoyx.Ref{ + ResourceType: "corteza::compose:page", + Identifiers: envoyx.MakeIdentifiers(r.SelfID), + }, + } + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + for k, ref := range refs { + ref.Scope = scope + refs[k] = ref + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.PageResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makePageFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.PageFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.PageID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + ar, ok = refs["ModuleID"] + if ok { + out.ModuleID = ar.Resource.GetID() + } + + ar, ok = refs["NamespaceID"] + if ok { + out.NamespaceID = ar.Resource.GetID() + } + + ar, ok = refs["SelfID"] + if ok { + out.ParentID = ar.Resource.GetID() + } + + out = d.extendPageFilter(scope, refs, auxf, out) + return +} diff --git a/server/compose/envoy/store_decode.go b/server/compose/envoy/store_decode.go new file mode 100644 index 000000000..0e45aaedc --- /dev/null +++ b/server/compose/envoy/store_decode.go @@ -0,0 +1,117 @@ +package envoy + +import ( + "context" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/dal" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/store" +) + +func (d StoreDecoder) extendNamespaceFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter, base types.NamespaceFilter) (out types.NamespaceFilter) { + out = base + + if scope == nil { + return + } + + if scope.ResourceType == "" { + return + } + + // Overwrite it + out.NamespaceID = []uint64{scope.Resource.GetID()} + + return +} + +func (d StoreDecoder) extendModuleFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter, base types.ModuleFilter) (out types.ModuleFilter) { + out = base + + if scope == nil { + return + } + + if scope.ResourceType == "" { + return + } + + // Overwrite it + out.NamespaceID = scope.Resource.GetID() + + return +} + +func (d StoreDecoder) extendPageFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter, base types.PageFilter) (out types.PageFilter) { + out = base + + if scope == nil { + return + } + + if scope.ResourceType == "" { + return + } + + // Overwrite it + out.NamespaceID = scope.Resource.GetID() + + return +} + +func (d StoreDecoder) makeModuleFieldFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ModuleFieldFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + // ar, ok = refs["ModuleID"] + // if ok { + // out.ModuleID = ar.Resource.GetID() + // } + + return +} + +func (d StoreDecoder) extendedModuleDecoder(ctx context.Context, s store.Storer, dl dal.FullService, f types.ModuleFilter, base envoyx.NodeSet) (out envoyx.NodeSet, err error) { + var ff types.ModuleFieldSet + + for _, b := range base { + ff, _, err = store.SearchComposeModuleFields(ctx, s, types.ModuleFieldFilter{ModuleID: []uint64{b.Resource.GetID()}}) + if err != nil { + return + } + + // No need to assign them under the module since we're working with nodes now + for _, f := range ff { + out = append(out, &envoyx.Node{ + Resource: f, + + ResourceType: types.ModuleFieldResourceType, + Identifiers: envoyx.MakeIdentifiers(f.ID, f.Name), + References: envoyx.MergeRefs(b.References, map[string]envoyx.Ref{ + "ModuleID": b.ToRef(), + }), + Scope: b.Scope, + }) + } + } + + return +} + +func (d StoreDecoder) decodeChartRefs(c *types.Chart) (refs map[string]envoyx.Ref) { + + // @todo + return +} diff --git a/server/compose/envoy/store_encode.gen.go b/server/compose/envoy/store_encode.gen.go new file mode 100644 index 000000000..172d5b021 --- /dev/null +++ b/server/compose/envoy/store_encode.gen.go @@ -0,0 +1,1021 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "strconv" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/id" + "github.com/cortezaproject/corteza/server/store" +) + +type ( + // StoreEncoder is responsible for encoding Corteza resources into the + // database via the Storer or the DAL interface + // + // @todo consider having a different encoder for the DAL resources + StoreEncoder struct{} +) + +// Prepare performs some initial processing on the resource before it can be encoded +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Prepare(ctx context.Context, p envoyx.EncodeParams, rt string, nn envoyx.NodeSet) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.ChartResourceType: + return e.prepareChart(ctx, p, s, nn) + case types.ModuleResourceType: + return e.prepareModule(ctx, p, s, nn) + case types.ModuleFieldResourceType: + return e.prepareModuleField(ctx, p, s, nn) + case types.NamespaceResourceType: + return e.prepareNamespace(ctx, p, s, nn) + case types.PageResourceType: + return e.preparePage(ctx, p, s, nn) + + } + + return +} + +// Encode encodes the given Corteza resources into the primary store +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// The Encode function is called for every resource type where the resource +// appears at the root of the dependency tree. +// All of the root-level resources for that resource type are passed into the function. +// The encoding function must traverse the branches to encode all of the dependencies. +// +// This flow is used to simplify the flow of how resources are encoded into YAML +// (and other documents) as well as to simplify batching. +// +// Encode does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tree envoyx.Traverser) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.ChartResourceType: + return e.encodeCharts(ctx, p, s, nodes, tree) + + case types.ModuleResourceType: + return e.encodeModules(ctx, p, s, nodes, tree) + + case types.ModuleFieldResourceType: + return e.encodeModuleFields(ctx, p, s, nodes, tree) + + case types.NamespaceResourceType: + return e.encodeNamespaces(ctx, p, s, nodes, tree) + + case types.PageResourceType: + return e.encodePages(ctx, p, s, nodes, tree) + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource chart +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareChart prepares the resources of the given type for encoding +func (e StoreEncoder) prepareChart(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Chart, len(nn)) + err = e.matchupCharts(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareChart with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Chart) + if !ok { + panic("unexpected resource type: node expecting type of chart") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setChartDefaults(res) + if err != nil { + return err + } + + err = e.validateChart(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeCharts encodes a set of resource into the database +func (e StoreEncoder) encodeCharts(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeChart(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeChart encodes the resource into the database +func (e StoreEncoder) encodeChart(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertComposeChart(ctx, s, n.Resource.(*types.Chart)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupCharts returns an index with indicates what resources already exist +func (e StoreEncoder) matchupCharts(ctx context.Context, s store.Storer, uu map[int]types.Chart, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchComposeCharts(ctx, s, types.ChartFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Chart, len(aa)) + strMap := make(map[string]*types.Chart, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Chart + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource module +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareModule prepares the resources of the given type for encoding +func (e StoreEncoder) prepareModule(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Module, len(nn)) + err = e.matchupModules(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareModule with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Module) + if !ok { + panic("unexpected resource type: node expecting type of module") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setModuleDefaults(res) + if err != nil { + return err + } + + err = e.validateModule(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeModules encodes a set of resource into the database +func (e StoreEncoder) encodeModules(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeModule(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeModule encodes the resource into the database +func (e StoreEncoder) encodeModule(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertComposeModule(ctx, s, n.Resource.(*types.Module)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupModules returns an index with indicates what resources already exist +func (e StoreEncoder) matchupModules(ctx context.Context, s store.Storer, uu map[int]types.Module, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchComposeModules(ctx, s, types.ModuleFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Module, len(aa)) + strMap := make(map[string]*types.Module, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Module + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource moduleField +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareModuleField prepares the resources of the given type for encoding +func (e StoreEncoder) prepareModuleField(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.ModuleField, len(nn)) + err = e.matchupModuleFields(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareModuleField with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.ModuleField) + if !ok { + panic("unexpected resource type: node expecting type of moduleField") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setModuleFieldDefaults(res) + if err != nil { + return err + } + + err = e.validateModuleField(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeModuleFields encodes a set of resource into the database +func (e StoreEncoder) encodeModuleFields(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeModuleField(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeModuleField encodes the resource into the database +func (e StoreEncoder) encodeModuleField(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertComposeModuleField(ctx, s, n.Resource.(*types.ModuleField)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupModuleFields returns an index with indicates what resources already exist +func (e StoreEncoder) matchupModuleFields(ctx context.Context, s store.Storer, uu map[int]types.ModuleField, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchComposeModuleFields(ctx, s, types.ModuleFieldFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.ModuleField, len(aa)) + strMap := make(map[string]*types.ModuleField, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + strMap[a.Name] = a + + } + + var aux *types.ModuleField + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource namespace +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareNamespace prepares the resources of the given type for encoding +func (e StoreEncoder) prepareNamespace(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Namespace, len(nn)) + err = e.matchupNamespaces(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareNamespace with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Namespace) + if !ok { + panic("unexpected resource type: node expecting type of namespace") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setNamespaceDefaults(res) + if err != nil { + return err + } + + err = e.validateNamespace(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeNamespaces encodes a set of resource into the database +func (e StoreEncoder) encodeNamespaces(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeNamespace(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeNamespace encodes the resource into the database +func (e StoreEncoder) encodeNamespace(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertComposeNamespace(ctx, s, n.Resource.(*types.Namespace)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupNamespaces returns an index with indicates what resources already exist +func (e StoreEncoder) matchupNamespaces(ctx context.Context, s store.Storer, uu map[int]types.Namespace, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchComposeNamespaces(ctx, s, types.NamespaceFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Namespace, len(aa)) + strMap := make(map[string]*types.Namespace, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + strMap[a.Slug] = a + + } + + var aux *types.Namespace + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource page +// // // // // // // // // // // // // // // // // // // // // // // // // + +// preparePage prepares the resources of the given type for encoding +func (e StoreEncoder) preparePage(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Page, len(nn)) + err = e.matchupPages(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call preparePage with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Page) + if !ok { + panic("unexpected resource type: node expecting type of page") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setPageDefaults(res) + if err != nil { + return err + } + + err = e.validatePage(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodePages encodes a set of resource into the database +func (e StoreEncoder) encodePages(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodePage(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodePage encodes the resource into the database +func (e StoreEncoder) encodePage(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertComposePage(ctx, s, n.Resource.(*types.Page)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupPages returns an index with indicates what resources already exist +func (e StoreEncoder) matchupPages(ctx context.Context, s store.Storer, uu map[int]types.Page, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchComposePages(ctx, s, types.PageFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Page, len(aa)) + strMap := make(map[string]*types.Page, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Page + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e *StoreEncoder) grabStorer(p envoyx.EncodeParams) (s store.Storer, err error) { + auxs, ok := p.Params["storer"] + if !ok { + err = fmt.Errorf("storer not defined") + return + } + + s, ok = auxs.(store.Storer) + if !ok { + err = fmt.Errorf("invalid storer provided") + return + } + + return +} diff --git a/server/compose/envoy/store_encode.go b/server/compose/envoy/store_encode.go new file mode 100644 index 000000000..0fcd494fc --- /dev/null +++ b/server/compose/envoy/store_encode.go @@ -0,0 +1,45 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/compose/types" +) + +func (e StoreEncoder) setChartDefaults(res *types.Chart) (err error) { + return +} + +func (e StoreEncoder) validateChart(*types.Chart) (err error) { + return +} + +func (e StoreEncoder) setModuleDefaults(res *types.Module) (err error) { + return +} + +func (e StoreEncoder) validateModule(*types.Module) (err error) { + return +} + +func (e StoreEncoder) setModuleFieldDefaults(res *types.ModuleField) (err error) { + return +} + +func (e StoreEncoder) validateModuleField(*types.ModuleField) (err error) { + return +} + +func (e StoreEncoder) setNamespaceDefaults(res *types.Namespace) (err error) { + return +} + +func (e StoreEncoder) validateNamespace(*types.Namespace) (err error) { + return +} + +func (e StoreEncoder) setPageDefaults(res *types.Page) (err error) { + return +} + +func (e StoreEncoder) validatePage(*types.Page) (err error) { + return +} diff --git a/server/compose/envoy/yaml_decode.gen.go b/server/compose/envoy/yaml_decode.gen.go new file mode 100644 index 000000000..24050e22c --- /dev/null +++ b/server/compose/envoy/yaml_decode.gen.go @@ -0,0 +1,1596 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/rbac" + "github.com/cortezaproject/corteza/server/pkg/y7s" + systemTypes "github.com/cortezaproject/corteza/server/system/types" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" +) + +type ( + // YamlDecoder is responsible for decoding YAML documents into Corteza resources + // which are then managed by envoy and imported via an encoder. + YamlDecoder struct{} + documentContext struct { + references map[string]string + } + auxYamlDoc struct { + nodes envoyx.NodeSet + } +) + +// Decode returns a set of envoy nodes based on the provided params +// +// YamlDecoder expects the DecodeParam of `stream` which conforms +// to the io.Reader interface. +func (d YamlDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Get the reader + r, err := d.getReader(ctx, p) + if err != nil { + return + } + + // Offload decoding to the aux document + doc := &auxYamlDoc{} + err = yaml.NewDecoder(r).Decode(doc) + if err != nil { + return + } + + return doc.nodes, nil +} + +func (d *auxYamlDoc) UnmarshalYAML(n *yaml.Node) (err error) { + // Get the document context from the root level + dctx, err := d.getDocumentContext(n) + if err != nil { + return + } + + var aux envoyx.NodeSet + return y7s.EachMap(n, func(k, v *yaml.Node) error { + kv := strings.ToLower(k.Value) + + switch kv { + case "chart", "charts", "chrt": + if y7s.IsMapping(v) { + aux, err = d.unmarshalChartMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalChartSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "module", "modules", "mod": + if y7s.IsMapping(v) { + aux, err = d.unmarshalModuleMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalModuleSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "modulefield", "module_fields", "modulefields", "fields": + if y7s.IsMapping(v) { + aux, err = d.unmarshalModuleFieldMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalModuleFieldSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "namespace", "namespaces", "ns": + if y7s.IsMapping(v) { + aux, err = d.unmarshalNamespaceMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalNamespaceSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "page", "pages", "pg": + if y7s.IsMapping(v) { + aux, err = d.unmarshalPageMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalPageSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + // Access control nodes + case "allow": + aux, err = unmarshalAllowNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + case "deny": + aux, err = unmarshalDenyNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Resource translation nodes + case "locale", "translation", "translations", "i18n": + aux, err = unmarshalLocaleNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Offload to custom handlers + default: + aux, err = d.unmarshalYAML(kv, v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + } + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource chart +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalChartSeq unmarshals Chart when provided as a sequence node +func (d *auxYamlDoc) unmarshalChartSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalChartNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalChartMap unmarshals Chart when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalChartMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalChartNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalChartNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalChartNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Chart + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "config": + + // Handle custom node decoder + // + // The decoder may update the passed resource with arbitrary values + // as well as provide additional references and identifiers for the node. + var ( + auxRefs map[string]envoyx.Ref + auxIdents envoyx.Identifiers + ) + auxRefs, auxIdents, err = unmarshalChartConfigNode(r, n) + if err != nil { + return err + } + refs = envoyx.MergeRefs(refs, auxRefs) + ii = ii.Merge(auxIdents) + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "namespaceid", "namespace", "namespace_id", "ns": + // Handle field alias + // + // @todo consider adding an is empty check before overwriting + err = y7s.DecodeScalar(n, "namespaceID", &r.NamespaceID) + if err != nil { + return err + } + // Handle references + err = y7s.DecodeScalar(n, "namespaceID", &auxNodeValue) + if err != nil { + return err + } + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Handle global namespace reference which can be provided as the doc. context + // + // @todo this is a temporary solution and should be extended when the document + // context needs to be extended. + // Limit this only to the compose resource since that is the only scenario + // the previous implementation supports. + if ref, ok := dctx.references["namespace"]; ok { + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: envoyx.MakeIdentifiers(ref), + } + } + + // Define the scope + // + // This resource is scoped to the first parent (generally the namespace) + // when talking about Compose resources (the only supported scenario at the moment). + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under chart such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ChartID"] = envoyx.Ref{ + ResourceType: types.ChartResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ChartResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["1"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource module +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalModuleSeq unmarshals Module when provided as a sequence node +func (d *auxYamlDoc) unmarshalModuleSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalModuleNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalModuleMap unmarshals Module when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalModuleMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalModuleNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalModuleNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalModuleNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Module + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "namespaceid", "namespace", "namespace_id", "ns", "ns_id": + // Handle field alias + // + // @todo consider adding an is empty check before overwriting + err = y7s.DecodeScalar(n, "namespaceID", &r.NamespaceID) + if err != nil { + return err + } + // Handle references + err = y7s.DecodeScalar(n, "namespaceID", &auxNodeValue) + if err != nil { + return err + } + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Handle global namespace reference which can be provided as the doc. context + // + // @todo this is a temporary solution and should be extended when the document + // context needs to be extended. + // Limit this only to the compose resource since that is the only scenario + // the previous implementation supports. + if ref, ok := dctx.references["namespace"]; ok { + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: envoyx.MakeIdentifiers(ref), + } + } + + // Define the scope + // + // This resource is scoped to the first parent (generally the namespace) + // when talking about Compose resources (the only supported scenario at the moment). + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under module such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ModuleID"] = envoyx.Ref{ + ResourceType: types.ModuleResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ModuleResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["1"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource moduleField +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalModuleFieldSeq unmarshals ModuleField when provided as a sequence node +func (d *auxYamlDoc) unmarshalModuleFieldSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalModuleFieldNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalModuleFieldMap unmarshals ModuleField when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalModuleFieldMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalModuleFieldNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalModuleFieldNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalModuleFieldNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.ModuleField + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Name", &r.Name) + ii = ii.Add(r.Name) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "moduleid": + // Handle references + err = y7s.DecodeScalar(n, "moduleID", &auxNodeValue) + if err != nil { + return err + } + refs["ModuleID"] = envoyx.Ref{ + ResourceType: "corteza::compose:module", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "name": + // Handle identifiers + err = y7s.DecodeScalar(n, "name", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Handle global namespace reference which can be provided as the doc. context + // + // @todo this is a temporary solution and should be extended when the document + // context needs to be extended. + // Limit this only to the compose resource since that is the only scenario + // the previous implementation supports. + if ref, ok := dctx.references["namespace"]; ok { + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: envoyx.MakeIdentifiers(ref), + } + } + + // Define the scope + // + // This resource is scoped to the first parent (generally the namespace) + // when talking about Compose resources (the only supported scenario at the moment). + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under moduleField such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ModuleFieldID"] = envoyx.Ref{ + ResourceType: types.ModuleFieldResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ModuleFieldResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["2"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource namespace +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalNamespaceSeq unmarshals Namespace when provided as a sequence node +func (d *auxYamlDoc) unmarshalNamespaceSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalNamespaceNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalNamespaceMap unmarshals Namespace when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalNamespaceMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalNamespaceNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalNamespaceNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalNamespaceNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Namespace + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Slug", &r.Slug) + ii = ii.Add(r.Slug) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "slug": + // Handle identifiers + err = y7s.DecodeScalar(n, "slug", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Handle global namespace reference which can be provided as the doc. context + // + // @todo this is a temporary solution and should be extended when the document + // context needs to be extended. + // Limit this only to the compose resource since that is the only scenario + // the previous implementation supports. + if ref, ok := dctx.references["namespace"]; ok { + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: envoyx.MakeIdentifiers(ref), + } + } + + // Define the scope + // + // This resource is scoped with no parent resources so this resource is the + // root itself (generally the namespace -- the only currently supported scenario). + scope = envoyx.Scope{ + ResourceType: types.NamespaceResourceType, + Identifiers: ii, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under namespace such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.NamespaceResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource page +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalPageSeq unmarshals Page when provided as a sequence node +func (d *auxYamlDoc) unmarshalPageSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalPageNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalPageMap unmarshals Page when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalPageMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalPageNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalPageNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalPageNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Page + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "moduleid": + // Handle references + err = y7s.DecodeScalar(n, "moduleID", &auxNodeValue) + if err != nil { + return err + } + refs["ModuleID"] = envoyx.Ref{ + ResourceType: "corteza::compose:module", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "namespaceid": + // Handle references + err = y7s.DecodeScalar(n, "namespaceID", &auxNodeValue) + if err != nil { + return err + } + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: "corteza::compose:namespace", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "selfid": + // Handle references + err = y7s.DecodeScalar(n, "selfID", &auxNodeValue) + if err != nil { + return err + } + refs["SelfID"] = envoyx.Ref{ + ResourceType: "corteza::compose:page", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Handle global namespace reference which can be provided as the doc. context + // + // @todo this is a temporary solution and should be extended when the document + // context needs to be extended. + // Limit this only to the compose resource since that is the only scenario + // the previous implementation supports. + if ref, ok := dctx.references["namespace"]; ok { + refs["NamespaceID"] = envoyx.Ref{ + ResourceType: types.NamespaceResourceType, + Identifiers: envoyx.MakeIdentifiers(ref), + } + } + + // Define the scope + // + // This resource is scoped to the first parent (generally the namespace) + // when talking about Compose resources (the only supported scenario at the moment). + scope = envoyx.Scope{ + ResourceType: refs["NamespaceID"].ResourceType, + Identifiers: refs["NamespaceID"].Identifiers, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under page such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["PageID"] = envoyx.Ref{ + ResourceType: types.PageResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.PageResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["1"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// RBAC unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalAllowNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Allow) +} + +func unmarshalDenyNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Deny) +} + +func unmarshalRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + if y7s.IsMapping(n.Content[1]) { + return unmarshalNestedRBACNode(n, acc) + } + + return unmarshalFlatRBACNode(n, acc) +} + +// unmarshalNestedRBACNode handles RBAC rules when they are nested inside a resource +// +// The edge-case exists since the node doesn't explicitly specify the resource +// it belongs to. +// +// Example: +// +// modules: +// module1: +// name: "module 1" +// fields: ... +// allow: +// role1: +// - read +// - delete +func unmarshalNestedRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + // Handles role + return out, y7s.EachMap(n, func(role, perm *yaml.Node) error { + // Handles operation + return y7s.EachMap(perm, func(res, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Resource: res.Value, + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: envoyx.MergeRefs( + map[string]envoyx.Ref{"RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }}, + envoyx.SplitResourceIdentifier(res.Value), + ), + }) + return nil + }) + }) +} + +// unmarshalFlatRBACNode handles RBAC rules when they are provided on the root level +// +// Example: +// +// allow: +// role1: +// corteza::system/: +// - users.search +// - users.create +func unmarshalFlatRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(role, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: map[string]envoyx.Ref{ + "RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }, + }, + }) + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// i18n unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalLocaleNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(lang, loc *yaml.Node) error { + langTag := systemTypes.Lang{Tag: language.Make(lang.Value)} + + return y7s.EachMap(loc, func(res, kv *yaml.Node) error { + return y7s.EachMap(kv, func(k, msg *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &systemTypes.ResourceTranslation{ + Lang: langTag, + K: k.Value, + Message: msg.Value, + }, + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:resource-translation", + References: envoyx.SplitResourceIdentifier(res.Value), + }) + return nil + }) + }) + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utilities +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d YamlDecoder) getReader(ctx context.Context, p envoyx.DecodeParams) (r io.Reader, err error) { + aux, ok := p.Params["stream"] + if ok { + r, ok = aux.(io.Reader) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML decoder expects a stream conforming to io.Reader interface") + return +} + +func (d *auxYamlDoc) getDocumentContext(n *yaml.Node) (dctx documentContext, err error) { + dctx = documentContext{ + references: make(map[string]string), + } + + err = y7s.EachMap(n, func(k, v *yaml.Node) error { + // @todo expand when needed. The previous implementation only supported + // namespaces on the root of the document. + + if y7s.IsKind(v, yaml.ScalarNode) { + dctx.references[k.Value] = v.Value + } + + return nil + }) + + return +} diff --git a/server/compose/envoy/yaml_decode.go b/server/compose/envoy/yaml_decode.go new file mode 100644 index 000000000..0ea431d19 --- /dev/null +++ b/server/compose/envoy/yaml_decode.go @@ -0,0 +1,66 @@ +package envoy + +import ( + "fmt" + "strings" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/y7s" + "gopkg.in/yaml.v3" +) + +func unmarshalChartConfigNode(r *types.Chart, n *yaml.Node) (refs map[string]envoyx.Ref, idents envoyx.Identifiers, err error) { + err = y7s.EachMap(n, func(k, v *yaml.Node) error { + if k.Value != "reports" { + return nil + } + + if y7s.IsSeq(v) { + var ( + auxRefs = make(map[string]envoyx.Ref) + auxIdents envoyx.Identifiers + i = -1 + ) + err = y7s.EachSeq(v, func(c *yaml.Node) error { + i++ + + auxRefs, auxIdents, err = unmarshalChartConfigReportNode(r, c, i) + refs = envoyx.MergeRefs(refs, auxRefs) + idents = idents.Merge(auxIdents) + return err + }) + if err != nil { + return err + } + } else { + refs, idents, err = unmarshalChartConfigReportNode(r, v, 0) + return err + } + return nil + }) + + return +} + +func unmarshalChartConfigReportNode(r *types.Chart, n *yaml.Node, index int) (refs map[string]envoyx.Ref, idents envoyx.Identifiers, err error) { + err = y7s.EachMap(n, func(k, v *yaml.Node) error { + switch strings.ToLower(k.Value) { + case "module", "mod", "moduleid", "module_id": + var auxi any + y7s.DecodeScalar(v, "moduleID", &auxi) + refs = map[string]envoyx.Ref{ + fmt.Sprintf("Config.Reports.%d.ModuleID", index): { + ResourceType: types.ModuleResourceType, + Identifiers: envoyx.MakeIdentifiers(auxi), + }, + } + } + return nil + }) + return +} + +func (d *auxYamlDoc) unmarshalYAML(k string, n *yaml.Node) (out envoyx.NodeSet, err error) { + return +} diff --git a/server/compose/envoy/yaml_encode.gen.go b/server/compose/envoy/yaml_encode.gen.go new file mode 100644 index 000000000..16761ae03 --- /dev/null +++ b/server/compose/envoy/yaml_encode.gen.go @@ -0,0 +1,525 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/y7s" + "gopkg.in/yaml.v3" +) + +type ( + // YamlEncoder is responsible for encoding Corteza resources into + // a YAML supported format + YamlEncoder struct{} +) + +// Encode encodes the given Corteza resources into some YAML supported format +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e YamlEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tt envoyx.Traverser) (err error) { + var ( + out *yaml.Node + aux *yaml.Node + ) + _ = aux + + w, err := e.getWriter(p) + if err != nil { + return + } + + switch rt { + case types.ChartResourceType: + aux, err = e.encodeCharts(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "chart", aux) + if err != nil { + return + } + case types.ModuleResourceType: + aux, err = e.encodeModules(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "module", aux) + if err != nil { + return + } + case types.ModuleFieldResourceType: + aux, err = e.encodeModuleFields(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "moduleField", aux) + if err != nil { + return + } + case types.NamespaceResourceType: + aux, err = e.encodeNamespaces(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "namespace", aux) + if err != nil { + return + } + case types.PageResourceType: + aux, err = e.encodePages(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "page", aux) + if err != nil { + return + } + + } + + return yaml.NewEncoder(w).Encode(out) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource chart +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeCharts(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeChart(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeChart focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeChart(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Chart) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxNamespaceID, err := e.encodeRef(p, res.NamespaceID, "NamespaceID", node, tt) + if err != nil { + return + } + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "config", res.Config, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "handle", res.Handle, + "id", res.ID, + "name", res.Name, + "namespaceID", auxNamespaceID, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource module +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeModules(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeModule(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeModule focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeModule(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Module) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxNamespaceID, err := e.encodeRef(p, res.NamespaceID, "NamespaceID", node, tt) + if err != nil { + return + } + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "config", e.encodeModuleConfig(p, res.Config), + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "fields", res.Fields, + "handle", res.Handle, + "id", res.ID, + "meta", res.Meta, + "name", res.Name, + "namespaceID", auxNamespaceID, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource moduleField +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeModuleFields(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeModuleField(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeModuleField focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeModuleField(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.ModuleField) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxModuleID, err := e.encodeRef(p, res.ModuleID, "ModuleID", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "config", res.Config, + "createdAt", auxCreatedAt, + "defaultValue", res.DefaultValue, + "deletedAt", auxDeletedAt, + "expressions", res.Expressions, + "id", res.ID, + "kind", res.Kind, + "label", res.Label, + "moduleID", auxModuleID, + "multi", res.Multi, + "name", res.Name, + "options", res.Options, + "place", res.Place, + "required", res.Required, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource namespace +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeNamespaces(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeNamespace(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeNamespace focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeNamespace(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Namespace) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "enabled", res.Enabled, + "id", res.ID, + "meta", res.Meta, + "name", res.Name, + "slug", res.Slug, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource page +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodePages(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodePage(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodePage focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodePage(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Page) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxModuleID, err := e.encodeRef(p, res.ModuleID, "ModuleID", node, tt) + if err != nil { + return + } + auxNamespaceID, err := e.encodeRef(p, res.NamespaceID, "NamespaceID", node, tt) + if err != nil { + return + } + auxSelfID, err := e.encodeRef(p, res.SelfID, "SelfID", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "blocks", res.Blocks, + "children", res.Children, + "config", res.Config, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "description", res.Description, + "handle", res.Handle, + "id", res.ID, + "moduleID", auxModuleID, + "namespaceID", auxNamespaceID, + "selfID", auxSelfID, + "title", res.Title, + "updatedAt", auxUpdatedAt, + "visible", res.Visible, + "weight", res.Weight, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Encoding utils +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeTimestamp(p envoyx.EncodeParams, t time.Time) (any, error) { + if t.IsZero() { + return nil, nil + } + + tz := p.Config.PreferredTimezone + if tz != "" { + tzL, err := time.LoadLocation(tz) + if err != nil { + return nil, err + } + t = t.In(tzL) + } + + ly := p.Config.PreferredTimeLayout + if ly == "" { + ly = time.RFC3339 + } + + return t.Format(ly), nil +} + +func (e YamlEncoder) encodeTimestampNil(p envoyx.EncodeParams, t *time.Time) (any, error) { + if t == nil { + return nil, nil + } + + // @todo timestamp encoding format + return e.encodeTimestamp(p, *t) +} + +func (e YamlEncoder) encodeRef(p envoyx.EncodeParams, id uint64, field string, node *envoyx.Node, tt envoyx.Traverser) (any, error) { + parent := tt.ParentForRef(node, node.References[field]) + + // @todo should we panic instead? + // for now gracefully fallback to the ID + if parent == nil { + return id, nil + } + + return node.Identifiers.FriendlyIdentifier(), nil +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) getWriter(p envoyx.EncodeParams) (out io.Writer, err error) { + aux, ok := p.Params["writer"] + if ok { + out, ok = aux.(io.Writer) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML encoder expects a writer conforming to io.Writer interface") + return +} diff --git a/server/compose/envoy/yaml_encode.go b/server/compose/envoy/yaml_encode.go new file mode 100644 index 000000000..a7a6b633f --- /dev/null +++ b/server/compose/envoy/yaml_encode.go @@ -0,0 +1,13 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/compose/types" + "github.com/cortezaproject/corteza/server/pkg/envoyx" +) + +func (e YamlEncoder) encodeModuleConfig(p envoyx.EncodeParams, cfg types.ModuleConfig) any { + + // @todo... + + return nil +} diff --git a/server/compose/module.cue b/server/compose/module.cue index ee919ceef..8f962f91d 100644 --- a/server/compose/module.cue +++ b/server/compose/module.cue @@ -19,6 +19,12 @@ module: { goType: "uint64", storeIdent: "rel_namespace" dal: { type: "Ref", refModelResType: "corteza::compose:namespace" } + + envoy: { + yaml: { + identKeyAlias: ["namespace", "namespace_id", "ns", "ns_id"] + } + } } handle: schema.HandleField name: { @@ -36,6 +42,11 @@ module: { dal: { type: "JSON", defaultEmptyObject: true } omitSetter: true omitGetter: true + envoy: { + yaml: { + customEncoder: true + } + } } fields: { goType: "types.ModuleFieldSet", @@ -72,6 +83,19 @@ module: { byNilState: ["deleted"] } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["modules", "mod"] + } + store: { + extendedFilterBuilder: true + extendedDecoder: true + } + } + rbac: { operations: { "read": {} diff --git a/server/compose/module_field.cue b/server/compose/module_field.cue index 6befe41f4..efac0c7f1 100644 --- a/server/compose/module_field.cue +++ b/server/compose/module_field.cue @@ -13,7 +13,11 @@ moduleField: { model: { ident: "compose_module_field" attributes: { - id: schema.IdField + id: schema.IdField & { + envoy: { + identifier: true + } + } module_id: { ident: "moduleID", goType: "uint64", @@ -39,6 +43,10 @@ moduleField: { name: { sortable: true dal: {} + } & { + envoy: { + identifier: true + } } label: { sortable: true @@ -104,6 +112,19 @@ moduleField: { checkFn: false } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Name" + identKeyAlias: ["module_fields", "modulefields", "fields"] + } + store: { + handleField: "" + customFilterBuilder: true + } + } + rbac: { operations: { "record.value.read": description: "Read field value on records" diff --git a/server/compose/namespace.cue b/server/compose/namespace.cue index 462ec1987..02def2d11 100644 --- a/server/compose/namespace.cue +++ b/server/compose/namespace.cue @@ -13,6 +13,9 @@ namespace: { sortable: true, goType: "string" dal: {} + envoy: { + identifier: true + } } enabled: { goType: "bool" @@ -45,7 +48,7 @@ namespace: { filter: { struct: { - namespace_id: { goType: "[]uint64", ident: "namespaceID" } + namespace_id: { goType: "[]uint64", ident: "namespaceID", storeIdent: "id" } slug: { goType: "string" } name: { goType: "string" } deleted: { goType: "filter.State", storeIdent: "deleted_at" } @@ -56,6 +59,19 @@ namespace: { byNilState: ["deleted"] } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Slug" + identKeyAlias: ["namespaces", "ns"] + } + store: { + handleField: "Slug" + extendedFilterBuilder: true + } + } + rbac: { operations: { "read": {} diff --git a/server/compose/page.cue b/server/compose/page.cue index 0849b2cdb..6f37a6fb7 100644 --- a/server/compose/page.cue +++ b/server/compose/page.cue @@ -24,6 +24,11 @@ page: { goType: "uint64", dal: { type: "Ref", refModelResType: "corteza::compose:page" } sortable: true + envoy: { + store: { + filterRefField: "ParentID" + } + } } module_id: { ident: "moduleID", @@ -87,6 +92,7 @@ page: { filter: { struct: { + page_id: { goType: "uint64", ident: "pageID", storeIdent: "id" } namespace_id: { goType: "uint64", ident: "namespaceID", storeIdent: "rel_namespace" } parent_id: { goType: "uint64", ident: "parentID" } module_id: { goType: "uint64", ident: "moduleID", storeIdent: "rel_module" } @@ -97,10 +103,22 @@ page: { } query: ["handle", "title", "description"] - byValue: ["handle", "namespace_id", "module_id"] + byValue: ["page_id", "handle", "namespace_id", "module_id"] byNilState: ["deleted"] } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["pages", "pg"] + } + store: { + extendedFilterBuilder: true + } + } + rbac: { operations: { "read": {} diff --git a/server/compose/record.cue b/server/compose/record.cue index 456b6f505..47b034060 100644 --- a/server/compose/record.cue +++ b/server/compose/record.cue @@ -66,6 +66,11 @@ record: { } } + // @todo tmp + envoy: { + omit: true + } + defaultGetter: true defaultSetter: true diff --git a/server/compose/record_revision.cue b/server/compose/record_revision.cue index 9fbb2ad45..567eaff22 100644 --- a/server/compose/record_revision.cue +++ b/server/compose/record_revision.cue @@ -38,4 +38,8 @@ record_revision: { "primary": { attribute: "id" } } } + + envoy: { + omit: true + } } diff --git a/server/compose/types/page.go b/server/compose/types/page.go index 84073e46f..78a833d1f 100644 --- a/server/compose/types/page.go +++ b/server/compose/types/page.go @@ -143,13 +143,14 @@ type ( } PageFilter struct { - NamespaceID uint64 `json:"namespaceID,string"` - ParentID uint64 `json:"parentID,string,omitempty"` - ModuleID uint64 `json:"moduleID,string,omitempty"` - Root bool `json:"root,omitempty"` - Handle string `json:"handle"` - Title string `json:"title"` - Query string `json:"query"` + PageID []uint64 `json:"pageID,string"` + NamespaceID uint64 `json:"namespaceID,string"` + ParentID uint64 `json:"parentID,string,omitempty"` + ModuleID uint64 `json:"moduleID,string,omitempty"` + Root bool `json:"root,omitempty"` + Handle string `json:"handle"` + Title string `json:"title"` + Query string `json:"query"` LabeledIDs []uint64 `json:"-"` Labels map[string]string `json:"labels,omitempty"` diff --git a/server/federation/module_exposed.cue b/server/federation/module_exposed.cue index 497daeb22..0d3883ead 100644 --- a/server/federation/module_exposed.cue +++ b/server/federation/module_exposed.cue @@ -71,6 +71,9 @@ exposedModule: { byValue: ["compose_module_id", "compose_namespace_id", "node_id"] } + envoy: { + omit: true + } rbac: { operations: { diff --git a/server/federation/module_mapping.cue b/server/federation/module_mapping.cue index 714f0ebfb..0c886c5ab 100644 --- a/server/federation/module_mapping.cue +++ b/server/federation/module_mapping.cue @@ -54,6 +54,10 @@ moduleMapping: { } } + envoy: { + omit: true + } + filter: { struct: { compose_module_id: { goType: "uint64", ident: "composeModuleID", storeIdent: "rel_compose_module" } diff --git a/server/federation/node.cue b/server/federation/node.cue index de93010eb..7171b7e16 100644 --- a/server/federation/node.cue +++ b/server/federation/node.cue @@ -57,6 +57,10 @@ node: { } } + envoy: { + omit: true + } + filter: { struct: { name: { goType: "string" } diff --git a/server/federation/node_sync.cue b/server/federation/node_sync.cue index 964310208..4d62d7ee6 100644 --- a/server/federation/node_sync.cue +++ b/server/federation/node_sync.cue @@ -53,6 +53,10 @@ nodeSync: { byValue: ["node_id", "module_id", "sync_status", "sync_type"] } + envoy: { + omit: true + } + store: { ident: "federationNodeSync" diff --git a/server/federation/shared_module.cue b/server/federation/shared_module.cue index fec5cbf68..b2ac664bb 100644 --- a/server/federation/shared_module.cue +++ b/server/federation/shared_module.cue @@ -56,6 +56,10 @@ sharedModule: { } } + envoy: { + omit: true + } + filter: { struct: { node_id: { goType: "uint64", ident: "nodeID", storeIdent: "rel_node" } diff --git a/server/pkg/envoyx/node.go b/server/pkg/envoyx/node.go index b167a64c7..563521113 100644 --- a/server/pkg/envoyx/node.go +++ b/server/pkg/envoyx/node.go @@ -222,6 +222,16 @@ func (ii Identifiers) Add(vv ...any) (out Identifiers) { return ii } +func (ii Identifiers) IdentsAsStrings() (ids, rest []string) { + aux, rest := ii.Idents() + + for _, a := range aux { + ids = append(ids, strconv.FormatUint(a, 10)) + } + + return +} + // Idents returns a slice of numeric and text identifiers func (ii Identifiers) Idents() (ints []uint64, rest []string) { var aux uint64 diff --git a/server/pkg/envoyx/utils.gen.go b/server/pkg/envoyx/utils.gen.go new file mode 100644 index 000000000..f66e903d3 --- /dev/null +++ b/server/pkg/envoyx/utils.gen.go @@ -0,0 +1,36 @@ +package envoyx + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import () + +var ( + // needyResources is a list of resources that require a parent resource + // + // This list is primarily used when figuring out what nodes the dep. graph + // should return when traversing. + needyResources = map[string]bool{ + + "corteza::compose:chart": true, + "corteza::compose:module": true, + "corteza::compose:module-field": true, + + "corteza::compose:page": true, + "corteza::compose:record": true, + + "corteza::federation:exposed-module": true, + "corteza::federation:module-mapping": true, + + "corteza::federation:shared-module": true, + } + + // superNeedyResources is the second level of filtering in case the first + // pass removes everything + superNeedyResources = map[string]bool{ + "corteza::compose:module-field": true, + } +) diff --git a/server/store/adapters/rdbms/filters.gen.go b/server/store/adapters/rdbms/filters.gen.go index b2b935a4d..a48addd29 100644 --- a/server/store/adapters/rdbms/filters.gen.go +++ b/server/store/adapters/rdbms/filters.gen.go @@ -203,6 +203,10 @@ func ApigwFilterFilter(d drivers.Dialect, f systemType.ApigwFilterFilter) (ee [] ee = append(ee, expr) } + if f.ApigwFilterID > 0 { + ee = append(ee, goqu.C("id").Eq(f.ApigwFilterID)) + } + if f.RouteID > 0 { ee = append(ee, goqu.C("rel_route").Eq(f.RouteID)) } @@ -227,6 +231,10 @@ func ApigwRouteFilter(d drivers.Dialect, f systemType.ApigwRouteFilter) (ee []go ee = append(ee, expr) } + if len(f.ApigwrouteID) > 0 { + ee = append(ee, goqu.C("id").In(f.ApigwrouteID)) + } + if val := strings.TrimSpace(f.Route); len(val) > 0 { ee = append(ee, goqu.C("id").Eq(f.Route)) } @@ -456,6 +464,10 @@ func AutomationWorkflowFilter(d drivers.Dialect, f automationType.WorkflowFilter ee = append(ee, goqu.C("id").In(ss)) } + if val := strings.TrimSpace(f.Handle); len(val) > 0 { + ee = append(ee, goqu.C("handle").Eq(f.Handle)) + } + if len(f.LabeledIDs) > 0 { ee = append(ee, goqu.I("id").In(f.LabeledIDs)) } @@ -601,7 +613,7 @@ func ComposeNamespaceFilter(d drivers.Dialect, f composeType.NamespaceFilter) (e } if len(f.NamespaceID) > 0 { - ee = append(ee, goqu.C("namespace_id").In(f.NamespaceID)) + ee = append(ee, goqu.C("id").In(f.NamespaceID)) } if val := strings.TrimSpace(f.Name); len(val) > 0 { @@ -639,6 +651,10 @@ func ComposePageFilter(d drivers.Dialect, f composeType.PageFilter) (ee []goqu.E ee = append(ee, expr) } + if f.PageID > 0 { + ee = append(ee, goqu.C("id").Eq(f.PageID)) + } + if val := strings.TrimSpace(f.Handle); len(val) > 0 { ee = append(ee, goqu.C("handle").Eq(f.Handle)) } @@ -739,6 +755,10 @@ func DalSensitivityLevelFilter(d drivers.Dialect, f systemType.DalSensitivityLev ee = append(ee, goqu.C("id").In(f.SensitivityLevelID)) } + if val := strings.TrimSpace(f.Handle); len(val) > 0 { + ee = append(ee, goqu.C("handle").Eq(f.Handle)) + } + return ee, f, err } @@ -982,6 +1002,10 @@ func QueueFilter(d drivers.Dialect, f systemType.QueueFilter) (ee []goqu.Express ee = append(ee, expr) } + if f.QueueID > 0 { + ee = append(ee, goqu.C("id").Eq(f.QueueID)) + } + if f.Query != "" { ee = append(ee, goqu.Or( goqu.C("queue").ILike("%"+f.Query+"%"), diff --git a/server/system/apigw_filter.cue b/server/system/apigw_filter.cue index e4c7ded5a..c4af838d9 100644 --- a/server/system/apigw_filter.cue +++ b/server/system/apigw_filter.cue @@ -15,6 +15,11 @@ apigw_filter: { route: { sortable: true, goType: "uint64", storeIdent: "rel_route" dal: { type: "Ref", refModelResType: "corteza::system:apigw-route" } + envoy: { + store: { + omitRefFilter: true + } + } } weight: { sortable: true, @@ -53,14 +58,24 @@ apigw_filter: { } } + envoy: { + yaml: { + supportMappedInput: false + } + store: { + handleField: "" + } + } + filter: { struct: { + apigw_filter_id: {goType: "uint64", ident: "apigwFilterID", storeIdent: "id"} route_id: {goType: "uint64", ident: "routeID", storeIdent: "rel_route"} deleted: {goType: "filter.State", storeIdent: "deleted_at"} disabled: {goType: "filter.State", storeIdent: "enabled"} } - byValue: ["route_id"] + byValue: ["apigw_filter_id", "route_id"] byNilState: ["deleted"] byFalseState: ["disabled"] } diff --git a/server/system/apigw_route.cue b/server/system/apigw_route.cue index 372c57887..245ec05e5 100644 --- a/server/system/apigw_route.cue +++ b/server/system/apigw_route.cue @@ -40,6 +40,11 @@ apigw_route: { // @todo what does this do? refModelResType: "corteza::system:apigw-group" } + envoy: { + store: { + omitRefFilter: true + } + } } created_at: schema.SortableTimestampNowField @@ -55,8 +60,20 @@ apigw_route: { } } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Endpoint" + identKeyAlias: ["endpoints"] + } + store: { + handleField: "Endpoint" + } + } + filter: { struct: { + apigw_route_id: { goType: "[]uint64", ident: "apigwrouteID", storeIdent: "id" } route: {goType: "string", storeIdent: "id"} endpoint: {goType: "string"} method: {goType: "string"} @@ -65,7 +82,7 @@ apigw_route: { disabled: {goType: "filter.State", storeIdent: "enabled"} } - byValue: ["route", "method"] + byValue: ["apigw_route_id", "route", "method"] byNilState: ["deleted"] byFalseState: ["disabled"] } diff --git a/server/system/application.cue b/server/system/application.cue index cb6e20e79..97a0c7ea1 100644 --- a/server/system/application.cue +++ b/server/system/application.cue @@ -32,6 +32,11 @@ application: { schema.AttributeUserRef, storeIdent: "rel_owner", ident: "ownerID" + envoy: { + store: { + omitRefFilter: true + } + } } created_at: schema.SortableTimestampNowField updated_at: schema.SortableTimestampNilField @@ -62,6 +67,18 @@ application: { flags: true } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Name" + identKeyAlias: ["apps"] + } + store: { + extendedRefDecoder: true + handleField: "Name" + } + } + rbac: { operations: { read: diff --git a/server/system/attachment.cue b/server/system/attachment.cue index a4b1d6007..af819815e 100644 --- a/server/system/attachment.cue +++ b/server/system/attachment.cue @@ -47,6 +47,10 @@ attachment: { } } + envoy: { + omit: true + } + filter: { struct: { kind: {} diff --git a/server/system/auth_client.cue b/server/system/auth_client.cue index 3f3e85793..e03a1c5ac 100644 --- a/server/system/auth_client.cue +++ b/server/system/auth_client.cue @@ -6,14 +6,14 @@ import ( auth_client: { model: { - omitGetterSetter: true - attributes: { id: schema.IdField handle: schema.HandleField meta: { goType: "*types.AuthClientMeta" dal: { type: "JSON", defaultEmptyObject: true } + omitSetter: true + omitGetter: true } secret: { goType: "string" @@ -47,6 +47,8 @@ auth_client: { security: { goType: "*types.AuthClientSecurity" dal: { type: "JSON", defaultEmptyObject: true } + omitSetter: true + omitGetter: true } owned_by: schema.AttributeUserRef created_at: schema.SortableTimestampNowField @@ -83,6 +85,16 @@ auth_client: { user_id: {goType: "uint64"} } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["authclients"] + } + store: {} + } + rbac: { operations: { read: description: "Read authorization client" diff --git a/server/system/auth_confirmed_client.cue b/server/system/auth_confirmed_client.cue index f7985cdfb..286a6ad35 100644 --- a/server/system/auth_confirmed_client.cue +++ b/server/system/auth_confirmed_client.cue @@ -44,6 +44,10 @@ auth_confirmed_client: { byValue: ["user_id"] } + envoy: { + omit: true + } + store: { api: { diff --git a/server/system/auth_oa2token.cue b/server/system/auth_oa2token.cue index dca963196..b3bed4fa1 100644 --- a/server/system/auth_oa2token.cue +++ b/server/system/auth_oa2token.cue @@ -68,6 +68,10 @@ auth_oa2token: { byValue: ["user_id"] } + envoy: { + omit: true + } + store: { api: { lookups: [ diff --git a/server/system/auth_session.cue b/server/system/auth_session.cue index 44b0c5aba..c53eb13fe 100644 --- a/server/system/auth_session.cue +++ b/server/system/auth_session.cue @@ -55,6 +55,10 @@ auth_session: { byValue: ["user_id"] } + envoy: { + omit: true + } + store: { api: { lookups: [ diff --git a/server/system/credential.cue b/server/system/credential.cue index 43810a589..b052dca4c 100644 --- a/server/system/credential.cue +++ b/server/system/credential.cue @@ -60,6 +60,10 @@ credential: { checkFn: false } + envoy: { + omit: true + } + store: { api: { lookups: [ diff --git a/server/system/dal_connection.cue b/server/system/dal_connection.cue index 4e105be9d..67c7ca611 100644 --- a/server/system/dal_connection.cue +++ b/server/system/dal_connection.cue @@ -42,14 +42,14 @@ dal_connection: { filter: { struct: { - connection_id: {goType: "[]uint64", ident: "connectionID", storeIdent: "id"} + dal_connection_id: {goType: "[]uint64", ident: "connectionID", storeIdent: "id"} handle: {goType: "string"} type: {goType: "string"} deleted: {goType: "filter.State", storeIdent: "deleted_at"} } - byValue: ["connection_id", "handle", "type"] + byValue: ["dal_connection_id", "handle", "type"] byNilState: ["deleted"] } @@ -57,6 +57,15 @@ dal_connection: { labels: false } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["connection", "connections"] + } + store: {} + } + rbac: { operations: { "read": description: "Read connection" diff --git a/server/system/dal_sensitivity_level.cue b/server/system/dal_sensitivity_level.cue index eab6521d5..4ca1be6d6 100644 --- a/server/system/dal_sensitivity_level.cue +++ b/server/system/dal_sensitivity_level.cue @@ -36,15 +36,25 @@ dal_sensitivity_level: { filter: { struct: { - sensitivity_level_id: {goType: "[]uint64", ident: "sensitivityLevelID", storeIdent: "id"} + dal_sensitivity_level_id: {goType: "[]uint64", ident: "sensitivityLevelID", storeIdent: "id"} + handle: { goType: "string" } deleted: {goType: "filter.State", storeIdent: "deleted_at"} } - byValue: ["sensitivity_level_id"] + byValue: ["dal_sensitivity_level_id", "handle"] byNilState: ["deleted"] } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["sensitivity_level"] + } + store: {} + } + features: { labels: false } diff --git a/server/system/data_privacy_request.cue b/server/system/data_privacy_request.cue index 082555a2f..738b4cdd0 100644 --- a/server/system/data_privacy_request.cue +++ b/server/system/data_privacy_request.cue @@ -77,6 +77,10 @@ data_privacy_request: { } } + envoy: { + omit: true + } + store: { api: { lookups: [ diff --git a/server/system/data_privacy_request_comment.cue b/server/system/data_privacy_request_comment.cue index 1da7d6155..d5c707c0c 100644 --- a/server/system/data_privacy_request_comment.cue +++ b/server/system/data_privacy_request_comment.cue @@ -45,6 +45,10 @@ data_privacy_request_comment: { byValue: ["request_id"] } + envoy: { + omit: true + } + store: { api: { functions: [] diff --git a/server/system/envoy/store_decode.gen.go b/server/system/envoy/store_decode.gen.go new file mode 100644 index 000000000..60085d030 --- /dev/null +++ b/server/system/envoy/store_decode.gen.go @@ -0,0 +1,1035 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + + "github.com/cortezaproject/corteza/server/pkg/dal" + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/store" + "github.com/cortezaproject/corteza/server/system/types" +) + +type ( + // StoreDecoder is responsible for fetching already stored Corteza resources + // which are then managed by envoy and imported via an encoder. + StoreDecoder struct{} +) + +// Decode returns a set of envoy nodes based on the provided params +// +// StoreDecoder expects the DecodeParam of `storer` and `dal` which conform +// to the store.Storer and dal.FullService interfaces. +func (d StoreDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + var ( + s store.Storer + dl dal.FullService + ) + + // @todo we can optionally not require them based on what we're doing + if auxS, ok := p.Params["storer"]; ok { + s = auxS.(store.Storer) + } + if auxDl, ok := p.Params["dal"]; ok { + dl = auxDl.(dal.FullService) + } + + return d.decode(ctx, s, dl, p) +} + +func (d StoreDecoder) decode(ctx context.Context, s store.Storer, dl dal.FullService, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Transform passed filters into an ordered structure + type ( + filterWrap struct { + rt string + f envoyx.ResourceFilter + } + ) + wrappedFilters := make([]filterWrap, 0, len(p.Filter)) + for rt, f := range p.Filter { + wrappedFilters = append(wrappedFilters, filterWrap{rt: rt, f: f}) + } + + // Get all requested scopes + scopedNodes := make(envoyx.NodeSet, len(p.Filter)) + + // @note skipping scope logic since it's currently only supported within + // Compose resources. + + // Get all requested references + // + // Keep an index for the Node and one for the reference to make our + // lives easier. + refNodes := make([]map[string]*envoyx.Node, len(p.Filter)) + refRefs := make([]map[string]envoyx.Ref, len(p.Filter)) + for i, a := range wrappedFilters { + if len(a.f.Refs) == 0 { + continue + } + + auxr := make(map[string]*envoyx.Node, len(a.f.Refs)) + auxa := make(map[string]envoyx.Ref) + for field, ref := range a.f.Refs { + f := ref.ResourceFilter() + aux, err := d.decode(ctx, s, dl, envoyx.DecodeParams{ + Type: envoyx.DecodeTypeStore, + Filter: f, + }) + if err != nil { + return nil, err + } + if len(aux) == 0 { + return nil, fmt.Errorf("invalid reference %v", ref) + } + if len(aux) > 1 { + return nil, fmt.Errorf("ambiguous reference: too many resources returned %v", a.f) + } + + auxr[field] = aux[0] + auxa[field] = aux[0].ToRef() + } + + refNodes[i] = auxr + refRefs[i] = auxa + } + + var aux envoyx.NodeSet + for i, wf := range wrappedFilters { + switch wf.rt { + case types.ApplicationResourceType: + aux, err = d.decodeApplication(ctx, s, dl, d.makeApplicationFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.ApigwRouteResourceType: + aux, err = d.decodeApigwRoute(ctx, s, dl, d.makeApigwRouteFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.ApigwFilterResourceType: + aux, err = d.decodeApigwFilter(ctx, s, dl, d.makeApigwFilterFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.AuthClientResourceType: + aux, err = d.decodeAuthClient(ctx, s, dl, d.makeAuthClientFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.QueueResourceType: + aux, err = d.decodeQueue(ctx, s, dl, d.makeQueueFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.ReportResourceType: + aux, err = d.decodeReport(ctx, s, dl, d.makeReportFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.RoleResourceType: + aux, err = d.decodeRole(ctx, s, dl, d.makeRoleFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.TemplateResourceType: + aux, err = d.decodeTemplate(ctx, s, dl, d.makeTemplateFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.UserResourceType: + aux, err = d.decodeUser(ctx, s, dl, d.makeUserFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.DalConnectionResourceType: + aux, err = d.decodeDalConnection(ctx, s, dl, d.makeDalConnectionFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + case types.DalSensitivityLevelResourceType: + aux, err = d.decodeDalSensitivityLevel(ctx, s, dl, d.makeDalSensitivityLevelFilter(scopedNodes[i], refNodes[i], wf.f)) + if err != nil { + return + } + for _, a := range aux { + a.Identifiers = a.Identifiers.Merge(wf.f.Identifiers) + a.References = envoyx.MergeRefs(a.References, refRefs[i]) + } + out = append(out, aux...) + + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource application +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeApplication(ctx context.Context, s store.Storer, dl dal.FullService, f types.ApplicationFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchApplications(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "OwnerID": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnerID), + }, + } + + refs = envoyx.MergeRefs(refs, d.decodeApplicationRefs(r)) + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ApplicationResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeApplicationFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ApplicationFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ApplicationID = ids + + if len(hh) > 0 { + out.Name = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwRoute +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeApigwRoute(ctx context.Context, s store.Storer, dl dal.FullService, f types.ApigwRouteFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchApigwRoutes(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "Group": envoyx.Ref{ + ResourceType: "corteza::system:apigw-group", + Identifiers: envoyx.MakeIdentifiers(r.Group), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ApigwRouteResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeApigwRouteFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ApigwRouteFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ApigwRouteID = ids + + if len(hh) > 0 { + out.Endpoint = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwFilter +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeApigwFilter(ctx context.Context, s store.Storer, dl dal.FullService, f types.ApigwFilterFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchApigwFilters(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "Route": envoyx.Ref{ + ResourceType: "corteza::system:apigw-route", + Identifiers: envoyx.MakeIdentifiers(r.Route), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ApigwFilterResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeApigwFilterFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ApigwFilterFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ApigwFilterID = ids + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource authClient +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeAuthClient(ctx context.Context, s store.Storer, dl dal.FullService, f types.AuthClientFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchAuthClients(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "OwnedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: types.AuthClientResourceType, + Identifiers: ii, + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.AuthClientResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeAuthClientFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.AuthClientFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.AuthClientID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource queue +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeQueue(ctx context.Context, s store.Storer, dl dal.FullService, f types.QueueFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchQueues(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.ID, + r.Queue, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + scope = envoyx.Scope{ + ResourceType: types.QueueResourceType, + Identifiers: ii, + } + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.QueueResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeQueueFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.QueueFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.QueueID = ids + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource report +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeReport(ctx context.Context, s store.Storer, dl dal.FullService, f types.ReportFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchReports(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "OwnedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.ReportResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeReportFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.ReportFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.ReportID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource role +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeRole(ctx context.Context, s store.Storer, dl dal.FullService, f types.RoleFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchRoles(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{} + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.RoleResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeRoleFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.RoleFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.RoleID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource template +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeTemplate(ctx context.Context, s store.Storer, dl dal.FullService, f types.TemplateFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchTemplates(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "OwnerID": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.OwnerID), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.TemplateResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeTemplateFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.TemplateFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.TemplateID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource user +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeUser(ctx context.Context, s store.Storer, dl dal.FullService, f types.UserFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchUsers(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{} + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.UserResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeUserFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.UserFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.UserID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalConnection +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeDalConnection(ctx context.Context, s store.Storer, dl dal.FullService, f types.DalConnectionFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchDalConnections(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.DalConnectionResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeDalConnectionFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.DalConnectionFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.DalConnectionID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalSensitivityLevel +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d StoreDecoder) decodeDalSensitivityLevel(ctx context.Context, s store.Storer, dl dal.FullService, f types.DalSensitivityLevelFilter) (out envoyx.NodeSet, err error) { + // @todo this might need to be improved. + // Currently, no resource is vast enough to pose a problem. + rr, _, err := store.SearchDalSensitivityLevels(ctx, s, f) + if err != nil { + return + } + + for _, r := range rr { + // Identifiers + ii := envoyx.MakeIdentifiers( + r.Handle, + r.ID, + ) + + refs := map[string]envoyx.Ref{ + // Handle references + "CreatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.CreatedBy), + }, + // Handle references + "DeletedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.DeletedBy), + }, + // Handle references + "UpdatedBy": envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(r.UpdatedBy), + }, + } + + var scope envoyx.Scope + + out = append(out, &envoyx.Node{ + Resource: r, + + ResourceType: types.DalSensitivityLevelResourceType, + Identifiers: ii, + References: refs, + Scope: scope, + }) + } + + return +} + +func (d StoreDecoder) makeDalSensitivityLevelFilter(scope *envoyx.Node, refs map[string]*envoyx.Node, auxf envoyx.ResourceFilter) (out types.DalSensitivityLevelFilter) { + out.Limit = auxf.Limit + + ids, hh := auxf.Identifiers.Idents() + _ = ids + _ = hh + + out.DalSensitivityLevelID = ids + + if len(hh) > 0 { + out.Handle = hh[0] + } + + // Refs + var ( + ar *envoyx.Node + ok bool + ) + _ = ar + _ = ok + + return +} diff --git a/server/system/envoy/store_decode.go b/server/system/envoy/store_decode.go new file mode 100644 index 000000000..831ea4cf8 --- /dev/null +++ b/server/system/envoy/store_decode.go @@ -0,0 +1,12 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/system/types" +) + +func (d StoreDecoder) decodeApplicationRefs(c *types.Application) (refs map[string]envoyx.Ref) { + + // @todo + return +} diff --git a/server/system/envoy/store_encode.gen.go b/server/system/envoy/store_encode.gen.go new file mode 100644 index 000000000..cd4945edf --- /dev/null +++ b/server/system/envoy/store_encode.gen.go @@ -0,0 +1,2131 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "strconv" + + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/id" + "github.com/cortezaproject/corteza/server/store" + "github.com/cortezaproject/corteza/server/system/types" +) + +type ( + // StoreEncoder is responsible for encoding Corteza resources into the + // database via the Storer or the DAL interface + // + // @todo consider having a different encoder for the DAL resources + StoreEncoder struct{} +) + +// Prepare performs some initial processing on the resource before it can be encoded +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Prepare(ctx context.Context, p envoyx.EncodeParams, rt string, nn envoyx.NodeSet) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.ApplicationResourceType: + return e.prepareApplication(ctx, p, s, nn) + case types.ApigwRouteResourceType: + return e.prepareApigwRoute(ctx, p, s, nn) + case types.ApigwFilterResourceType: + return e.prepareApigwFilter(ctx, p, s, nn) + case types.AuthClientResourceType: + return e.prepareAuthClient(ctx, p, s, nn) + + case types.QueueResourceType: + return e.prepareQueue(ctx, p, s, nn) + + case types.ReportResourceType: + return e.prepareReport(ctx, p, s, nn) + + case types.RoleResourceType: + return e.prepareRole(ctx, p, s, nn) + + case types.TemplateResourceType: + return e.prepareTemplate(ctx, p, s, nn) + case types.UserResourceType: + return e.prepareUser(ctx, p, s, nn) + case types.DalConnectionResourceType: + return e.prepareDalConnection(ctx, p, s, nn) + case types.DalSensitivityLevelResourceType: + return e.prepareDalSensitivityLevel(ctx, p, s, nn) + } + + return +} + +// Encode encodes the given Corteza resources into the primary store +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// The Encode function is called for every resource type where the resource +// appears at the root of the dependency tree. +// All of the root-level resources for that resource type are passed into the function. +// The encoding function must traverse the branches to encode all of the dependencies. +// +// This flow is used to simplify the flow of how resources are encoded into YAML +// (and other documents) as well as to simplify batching. +// +// Encode does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e StoreEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tree envoyx.Traverser) (err error) { + s, err := e.grabStorer(p) + if err != nil { + return + } + + switch rt { + case types.ApplicationResourceType: + return e.encodeApplications(ctx, p, s, nodes, tree) + + case types.ApigwRouteResourceType: + return e.encodeApigwRoutes(ctx, p, s, nodes, tree) + + case types.ApigwFilterResourceType: + return e.encodeApigwFilters(ctx, p, s, nodes, tree) + + case types.AuthClientResourceType: + return e.encodeAuthClients(ctx, p, s, nodes, tree) + + case types.QueueResourceType: + return e.encodeQueues(ctx, p, s, nodes, tree) + + case types.ReportResourceType: + return e.encodeReports(ctx, p, s, nodes, tree) + + case types.RoleResourceType: + return e.encodeRoles(ctx, p, s, nodes, tree) + + case types.TemplateResourceType: + return e.encodeTemplates(ctx, p, s, nodes, tree) + + case types.UserResourceType: + return e.encodeUsers(ctx, p, s, nodes, tree) + + case types.DalConnectionResourceType: + return e.encodeDalConnections(ctx, p, s, nodes, tree) + + case types.DalSensitivityLevelResourceType: + return e.encodeDalSensitivityLevels(ctx, p, s, nodes, tree) + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource application +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareApplication prepares the resources of the given type for encoding +func (e StoreEncoder) prepareApplication(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Application, len(nn)) + err = e.matchupApplications(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareApplication with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Application) + if !ok { + panic("unexpected resource type: node expecting type of application") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setApplicationDefaults(res) + if err != nil { + return err + } + + err = e.validateApplication(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeApplications encodes a set of resource into the database +func (e StoreEncoder) encodeApplications(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeApplication(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeApplication encodes the resource into the database +func (e StoreEncoder) encodeApplication(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertApplication(ctx, s, n.Resource.(*types.Application)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupApplications returns an index with indicates what resources already exist +func (e StoreEncoder) matchupApplications(ctx context.Context, s store.Storer, uu map[int]types.Application, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchApplications(ctx, s, types.ApplicationFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Application, len(aa)) + strMap := make(map[string]*types.Application, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + + } + + var aux *types.Application + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwRoute +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareApigwRoute prepares the resources of the given type for encoding +func (e StoreEncoder) prepareApigwRoute(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.ApigwRoute, len(nn)) + err = e.matchupApigwRoutes(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareApigwRoute with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.ApigwRoute) + if !ok { + panic("unexpected resource type: node expecting type of apigwRoute") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setApigwRouteDefaults(res) + if err != nil { + return err + } + + err = e.validateApigwRoute(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeApigwRoutes encodes a set of resource into the database +func (e StoreEncoder) encodeApigwRoutes(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeApigwRoute(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeApigwRoute encodes the resource into the database +func (e StoreEncoder) encodeApigwRoute(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertApigwRoute(ctx, s, n.Resource.(*types.ApigwRoute)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupApigwRoutes returns an index with indicates what resources already exist +func (e StoreEncoder) matchupApigwRoutes(ctx context.Context, s store.Storer, uu map[int]types.ApigwRoute, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchApigwRoutes(ctx, s, types.ApigwRouteFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.ApigwRoute, len(aa)) + strMap := make(map[string]*types.ApigwRoute, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + + } + + var aux *types.ApigwRoute + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwFilter +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareApigwFilter prepares the resources of the given type for encoding +func (e StoreEncoder) prepareApigwFilter(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.ApigwFilter, len(nn)) + err = e.matchupApigwFilters(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareApigwFilter with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.ApigwFilter) + if !ok { + panic("unexpected resource type: node expecting type of apigwFilter") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setApigwFilterDefaults(res) + if err != nil { + return err + } + + err = e.validateApigwFilter(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeApigwFilters encodes a set of resource into the database +func (e StoreEncoder) encodeApigwFilters(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeApigwFilter(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeApigwFilter encodes the resource into the database +func (e StoreEncoder) encodeApigwFilter(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertApigwFilter(ctx, s, n.Resource.(*types.ApigwFilter)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupApigwFilters returns an index with indicates what resources already exist +func (e StoreEncoder) matchupApigwFilters(ctx context.Context, s store.Storer, uu map[int]types.ApigwFilter, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchApigwFilters(ctx, s, types.ApigwFilterFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.ApigwFilter, len(aa)) + strMap := make(map[string]*types.ApigwFilter, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + + } + + var aux *types.ApigwFilter + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource authClient +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareAuthClient prepares the resources of the given type for encoding +func (e StoreEncoder) prepareAuthClient(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.AuthClient, len(nn)) + err = e.matchupAuthClients(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareAuthClient with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.AuthClient) + if !ok { + panic("unexpected resource type: node expecting type of authClient") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setAuthClientDefaults(res) + if err != nil { + return err + } + + err = e.validateAuthClient(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeAuthClients encodes a set of resource into the database +func (e StoreEncoder) encodeAuthClients(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeAuthClient(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeAuthClient encodes the resource into the database +func (e StoreEncoder) encodeAuthClient(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertAuthClient(ctx, s, n.Resource.(*types.AuthClient)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupAuthClients returns an index with indicates what resources already exist +func (e StoreEncoder) matchupAuthClients(ctx context.Context, s store.Storer, uu map[int]types.AuthClient, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchAuthClients(ctx, s, types.AuthClientFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.AuthClient, len(aa)) + strMap := make(map[string]*types.AuthClient, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.AuthClient + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource queue +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareQueue prepares the resources of the given type for encoding +func (e StoreEncoder) prepareQueue(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Queue, len(nn)) + err = e.matchupQueues(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareQueue with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Queue) + if !ok { + panic("unexpected resource type: node expecting type of queue") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setQueueDefaults(res) + if err != nil { + return err + } + + err = e.validateQueue(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeQueues encodes a set of resource into the database +func (e StoreEncoder) encodeQueues(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeQueue(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeQueue encodes the resource into the database +func (e StoreEncoder) encodeQueue(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertQueue(ctx, s, n.Resource.(*types.Queue)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupQueues returns an index with indicates what resources already exist +func (e StoreEncoder) matchupQueues(ctx context.Context, s store.Storer, uu map[int]types.Queue, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchQueues(ctx, s, types.QueueFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Queue, len(aa)) + strMap := make(map[string]*types.Queue, len(aa)) + + for _, a := range aa { + idMap[a.ID] = a + strMap[a.Queue] = a + + } + + var aux *types.Queue + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource report +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareReport prepares the resources of the given type for encoding +func (e StoreEncoder) prepareReport(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Report, len(nn)) + err = e.matchupReports(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareReport with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Report) + if !ok { + panic("unexpected resource type: node expecting type of report") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setReportDefaults(res) + if err != nil { + return err + } + + err = e.validateReport(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeReports encodes a set of resource into the database +func (e StoreEncoder) encodeReports(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeReport(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeReport encodes the resource into the database +func (e StoreEncoder) encodeReport(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertReport(ctx, s, n.Resource.(*types.Report)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupReports returns an index with indicates what resources already exist +func (e StoreEncoder) matchupReports(ctx context.Context, s store.Storer, uu map[int]types.Report, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchReports(ctx, s, types.ReportFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Report, len(aa)) + strMap := make(map[string]*types.Report, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Report + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource role +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareRole prepares the resources of the given type for encoding +func (e StoreEncoder) prepareRole(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Role, len(nn)) + err = e.matchupRoles(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareRole with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Role) + if !ok { + panic("unexpected resource type: node expecting type of role") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setRoleDefaults(res) + if err != nil { + return err + } + + err = e.validateRole(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeRoles encodes a set of resource into the database +func (e StoreEncoder) encodeRoles(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeRole(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeRole encodes the resource into the database +func (e StoreEncoder) encodeRole(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertRole(ctx, s, n.Resource.(*types.Role)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupRoles returns an index with indicates what resources already exist +func (e StoreEncoder) matchupRoles(ctx context.Context, s store.Storer, uu map[int]types.Role, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchRoles(ctx, s, types.RoleFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Role, len(aa)) + strMap := make(map[string]*types.Role, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Role + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource template +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareTemplate prepares the resources of the given type for encoding +func (e StoreEncoder) prepareTemplate(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.Template, len(nn)) + err = e.matchupTemplates(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareTemplate with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.Template) + if !ok { + panic("unexpected resource type: node expecting type of template") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setTemplateDefaults(res) + if err != nil { + return err + } + + err = e.validateTemplate(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeTemplates encodes a set of resource into the database +func (e StoreEncoder) encodeTemplates(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeTemplate(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeTemplate encodes the resource into the database +func (e StoreEncoder) encodeTemplate(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertTemplate(ctx, s, n.Resource.(*types.Template)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupTemplates returns an index with indicates what resources already exist +func (e StoreEncoder) matchupTemplates(ctx context.Context, s store.Storer, uu map[int]types.Template, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchTemplates(ctx, s, types.TemplateFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.Template, len(aa)) + strMap := make(map[string]*types.Template, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.Template + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource user +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareUser prepares the resources of the given type for encoding +func (e StoreEncoder) prepareUser(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.User, len(nn)) + err = e.matchupUsers(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareUser with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.User) + if !ok { + panic("unexpected resource type: node expecting type of user") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setUserDefaults(res) + if err != nil { + return err + } + + err = e.validateUser(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeUsers encodes a set of resource into the database +func (e StoreEncoder) encodeUsers(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeUser(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeUser encodes the resource into the database +func (e StoreEncoder) encodeUser(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertUser(ctx, s, n.Resource.(*types.User)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupUsers returns an index with indicates what resources already exist +func (e StoreEncoder) matchupUsers(ctx context.Context, s store.Storer, uu map[int]types.User, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchUsers(ctx, s, types.UserFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.User, len(aa)) + strMap := make(map[string]*types.User, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.User + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalConnection +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareDalConnection prepares the resources of the given type for encoding +func (e StoreEncoder) prepareDalConnection(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.DalConnection, len(nn)) + err = e.matchupDalConnections(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareDalConnection with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.DalConnection) + if !ok { + panic("unexpected resource type: node expecting type of dalConnection") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setDalConnectionDefaults(res) + if err != nil { + return err + } + + err = e.validateDalConnection(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeDalConnections encodes a set of resource into the database +func (e StoreEncoder) encodeDalConnections(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeDalConnection(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeDalConnection encodes the resource into the database +func (e StoreEncoder) encodeDalConnection(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertDalConnection(ctx, s, n.Resource.(*types.DalConnection)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupDalConnections returns an index with indicates what resources already exist +func (e StoreEncoder) matchupDalConnections(ctx context.Context, s store.Storer, uu map[int]types.DalConnection, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchDalConnections(ctx, s, types.DalConnectionFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.DalConnection, len(aa)) + strMap := make(map[string]*types.DalConnection, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.DalConnection + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalSensitivityLevel +// // // // // // // // // // // // // // // // // // // // // // // // // + +// prepareDalSensitivityLevel prepares the resources of the given type for encoding +func (e StoreEncoder) prepareDalSensitivityLevel(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet) (err error) { + // Grab an index of already existing resources of this type + // @note since these resources should be fairly low-volume and existing for + // a short time (and because we batch by resource type); fetching them all + // into memory shouldn't hurt too much. + // @todo do some benchmarks and potentially implement some smarter check such as + // a bloom filter or something similar. + + // Initializing the index here (and using a hashmap) so it's not escaped to the heap + existing := make(map[int]types.DalSensitivityLevel, len(nn)) + err = e.matchupDalSensitivityLevels(ctx, s, existing, nn) + if err != nil { + return + } + + for i, n := range nn { + if n.Resource == nil { + panic("unexpected state: cannot call prepareDalSensitivityLevel with nodes without a defined Resource") + } + + res, ok := n.Resource.(*types.DalSensitivityLevel) + if !ok { + panic("unexpected resource type: node expecting type of dalSensitivityLevel") + } + + existing, hasExisting := existing[i] + + if hasExisting { + // On existing, we don't need to re-do identifiers and references; simply + // changing up the internal resource is enough. + // + // In the future, we can pass down the tree and re-do the deps like that + switch p.Config.OnExisting { + case envoyx.OnConflictPanic: + err = fmt.Errorf("resource already exists") + return + + case envoyx.OnConflictReplace: + // Replace; simple ID change should do the trick + res.ID = existing.ID + + case envoyx.OnConflictSkip: + // Replace the node's resource with the fetched one + res = &existing + + // @todo merging + } + } else { + // @todo actually a bottleneck. As per sonyflake docs, it can at most + // generate up to 2**8 (256) IDs per 10ms in a single thread. + // How can we improve this? + res.ID = id.Next() + } + + // We can skip validation/defaults when the resource is overwritten by + // the one already stored (the panic one errors out anyway) since it + // should already be ok. + if !hasExisting || p.Config.OnExisting != envoyx.OnConflictSkip { + err = e.setDalSensitivityLevelDefaults(res) + if err != nil { + return err + } + + err = e.validateDalSensitivityLevel(res) + if err != nil { + return err + } + } + + n.Resource = res + } + + return +} + +// encodeDalSensitivityLevels encodes a set of resource into the database +func (e StoreEncoder) encodeDalSensitivityLevels(ctx context.Context, p envoyx.EncodeParams, s store.Storer, nn envoyx.NodeSet, tree envoyx.Traverser) (err error) { + for _, n := range nn { + err = e.encodeDalSensitivityLevel(ctx, p, s, n, tree) + if err != nil { + return + } + } + + return +} + +// encodeDalSensitivityLevel encodes the resource into the database +func (e StoreEncoder) encodeDalSensitivityLevel(ctx context.Context, p envoyx.EncodeParams, s store.Storer, n *envoyx.Node, tree envoyx.Traverser) (err error) { + // Grab dependency references + var auxID uint64 + for fieldLabel, ref := range n.References { + rn := tree.ParentForRef(n, ref) + if rn == nil { + err = fmt.Errorf("missing node for ref %v", ref) + return + } + + auxID = rn.Resource.GetID() + if auxID == 0 { + err = fmt.Errorf("related resource doesn't provide an ID") + return + } + + err = n.Resource.SetValue(fieldLabel, 0, auxID) + if err != nil { + return + } + } + + // Flush to the DB + err = store.UpsertDalSensitivityLevel(ctx, s, n.Resource.(*types.DalSensitivityLevel)) + if err != nil { + return + } + + // Handle resources nested under it + // + // @todo how can we remove the OmitPlaceholderNodes call the same way we did for + // the root function calls? + + for rt, nn := range envoyx.NodesByResourceType(tree.Children(n)...) { + nn = envoyx.OmitPlaceholderNodes(nn...) + + switch rt { + + } + } + + return +} + +// matchupDalSensitivityLevels returns an index with indicates what resources already exist +func (e StoreEncoder) matchupDalSensitivityLevels(ctx context.Context, s store.Storer, uu map[int]types.DalSensitivityLevel, nn envoyx.NodeSet) (err error) { + // @todo might need to do it smarter then this. + // Most resources won't really be that vast so this should be acceptable for now. + aa, _, err := store.SearchDalSensitivityLevels(ctx, s, types.DalSensitivityLevelFilter{}) + if err != nil { + return + } + + idMap := make(map[uint64]*types.DalSensitivityLevel, len(aa)) + strMap := make(map[string]*types.DalSensitivityLevel, len(aa)) + + for _, a := range aa { + strMap[a.Handle] = a + idMap[a.ID] = a + + } + + var aux *types.DalSensitivityLevel + var ok bool + for i, n := range nn { + for _, idf := range n.Identifiers.Slice { + if id, err := strconv.ParseUint(idf, 10, 64); err == nil { + aux, ok = idMap[id] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + + aux, ok = strMap[idf] + if ok { + uu[i] = *aux + // When any identifier matches we can end it + break + } + } + } + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e *StoreEncoder) grabStorer(p envoyx.EncodeParams) (s store.Storer, err error) { + auxs, ok := p.Params["storer"] + if !ok { + err = fmt.Errorf("storer not defined") + return + } + + s, ok = auxs.(store.Storer) + if !ok { + err = fmt.Errorf("invalid storer provided") + return + } + + return +} diff --git a/server/system/envoy/store_encode.go b/server/system/envoy/store_encode.go new file mode 100644 index 000000000..8e1602583 --- /dev/null +++ b/server/system/envoy/store_encode.go @@ -0,0 +1,91 @@ +package envoy + +import "github.com/cortezaproject/corteza/server/system/types" + +func (e StoreEncoder) setApplicationDefaults(res *types.Application) (err error) { + return +} + +func (e StoreEncoder) validateApplication(res *types.Application) (err error) { + return +} + +func (e StoreEncoder) setApigwRouteDefaults(res *types.ApigwRoute) (err error) { + return +} + +func (e StoreEncoder) validateApigwRoute(res *types.ApigwRoute) (err error) { + return +} + +func (e StoreEncoder) setApigwFilterDefaults(res *types.ApigwFilter) (err error) { + return +} + +func (e StoreEncoder) validateApigwFilter(res *types.ApigwFilter) (err error) { + return +} + +func (e StoreEncoder) setAuthClientDefaults(res *types.AuthClient) (err error) { + return +} + +func (e StoreEncoder) validateAuthClient(res *types.AuthClient) (err error) { + return +} + +func (e StoreEncoder) setQueueDefaults(res *types.Queue) (err error) { + return +} + +func (e StoreEncoder) validateQueue(res *types.Queue) (err error) { + return +} + +func (e StoreEncoder) setReportDefaults(res *types.Report) (err error) { + return +} + +func (e StoreEncoder) validateReport(res *types.Report) (err error) { + return +} + +func (e StoreEncoder) setRoleDefaults(res *types.Role) (err error) { + return +} + +func (e StoreEncoder) validateRole(res *types.Role) (err error) { + return +} + +func (e StoreEncoder) setTemplateDefaults(res *types.Template) (err error) { + return +} + +func (e StoreEncoder) validateTemplate(res *types.Template) (err error) { + return +} + +func (e StoreEncoder) setUserDefaults(res *types.User) (err error) { + return +} + +func (e StoreEncoder) validateUser(res *types.User) (err error) { + return +} + +func (e StoreEncoder) setDalConnectionDefaults(res *types.DalConnection) (err error) { + return +} + +func (e StoreEncoder) validateDalConnection(res *types.DalConnection) (err error) { + return +} + +func (e StoreEncoder) setDalSensitivityLevelDefaults(res *types.DalSensitivityLevel) (err error) { + return +} + +func (e StoreEncoder) validateDalSensitivityLevel(res *types.DalSensitivityLevel) (err error) { + return +} diff --git a/server/system/envoy/yaml_decode.gen.go b/server/system/envoy/yaml_decode.gen.go new file mode 100644 index 000000000..0b6757196 --- /dev/null +++ b/server/system/envoy/yaml_decode.gen.go @@ -0,0 +1,2946 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/rbac" + "github.com/cortezaproject/corteza/server/pkg/y7s" + "github.com/cortezaproject/corteza/server/system/types" + systemTypes "github.com/cortezaproject/corteza/server/system/types" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" +) + +type ( + // YamlDecoder is responsible for decoding YAML documents into Corteza resources + // which are then managed by envoy and imported via an encoder. + YamlDecoder struct{} + documentContext struct { + references map[string]string + } + auxYamlDoc struct { + nodes envoyx.NodeSet + } +) + +// Decode returns a set of envoy nodes based on the provided params +// +// YamlDecoder expects the DecodeParam of `stream` which conforms +// to the io.Reader interface. +func (d YamlDecoder) Decode(ctx context.Context, p envoyx.DecodeParams) (out envoyx.NodeSet, err error) { + // Get the reader + r, err := d.getReader(ctx, p) + if err != nil { + return + } + + // Offload decoding to the aux document + doc := &auxYamlDoc{} + err = yaml.NewDecoder(r).Decode(doc) + if err != nil { + return + } + + return doc.nodes, nil +} + +func (d *auxYamlDoc) UnmarshalYAML(n *yaml.Node) (err error) { + // Get the document context from the root level + dctx, err := d.getDocumentContext(n) + if err != nil { + return + } + + var aux envoyx.NodeSet + return y7s.EachMap(n, func(k, v *yaml.Node) error { + kv := strings.ToLower(k.Value) + + switch kv { + case "application", "apps": + if y7s.IsMapping(v) { + aux, err = d.unmarshalApplicationMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalApplicationSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "apigwroute", "endpoints": + if y7s.IsMapping(v) { + aux, err = d.unmarshalApigwRouteMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalApigwRouteSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "apigwfilter": + if y7s.IsSeq(v) { + aux, err = d.unmarshalApigwFilterSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "authclient", "authclients": + if y7s.IsMapping(v) { + aux, err = d.unmarshalAuthClientMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalAuthClientSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "queue": + if y7s.IsMapping(v) { + aux, err = d.unmarshalQueueMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalQueueSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "report", "reports": + if y7s.IsMapping(v) { + aux, err = d.unmarshalReportMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalReportSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "role", "roles": + if y7s.IsMapping(v) { + aux, err = d.unmarshalRoleMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalRoleSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "template", "templates": + if y7s.IsMapping(v) { + aux, err = d.unmarshalTemplateMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalTemplateSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "user", "users", "usr": + if y7s.IsMapping(v) { + aux, err = d.unmarshalUserMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalUserSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "dalconnection", "connection", "connections": + if y7s.IsMapping(v) { + aux, err = d.unmarshalDalConnectionMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalDalConnectionSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + case "dalsensitivitylevel", "sensitivity_level": + if y7s.IsMapping(v) { + aux, err = d.unmarshalDalSensitivityLevelMap(dctx, v) + d.nodes = append(d.nodes, aux...) + return err + } + if y7s.IsSeq(v) { + aux, err = d.unmarshalDalSensitivityLevelSeq(dctx, v) + d.nodes = append(d.nodes, aux...) + } + return err + + // Access control nodes + case "allow": + aux, err = unmarshalAllowNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + case "deny": + aux, err = unmarshalDenyNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Resource translation nodes + case "locale", "translation", "translations", "i18n": + aux, err = unmarshalLocaleNode(v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + + // Offload to custom handlers + default: + aux, err = d.unmarshalYAML(kv, v) + d.nodes = append(d.nodes, aux...) + if err != nil { + return err + } + } + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource application +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalApplicationSeq unmarshals Application when provided as a sequence node +func (d *auxYamlDoc) unmarshalApplicationSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalApplicationNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalApplicationMap unmarshals Application when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalApplicationMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalApplicationNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalApplicationNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalApplicationNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Application + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Name", &r.Name) + ii = ii.Add(r.Name) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownerid": + // Handle references + err = y7s.DecodeScalar(n, "ownerID", &auxNodeValue) + if err != nil { + return err + } + refs["OwnerID"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under application such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ApplicationID"] = envoyx.Ref{ + ResourceType: types.ApplicationResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ApplicationResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwRoute +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalApigwRouteSeq unmarshals ApigwRoute when provided as a sequence node +func (d *auxYamlDoc) unmarshalApigwRouteSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalApigwRouteNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalApigwRouteMap unmarshals ApigwRoute when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalApigwRouteMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalApigwRouteNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalApigwRouteNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalApigwRouteNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.ApigwRoute + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Endpoint", &r.Endpoint) + ii = ii.Add(r.Endpoint) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "group": + // Handle references + err = y7s.DecodeScalar(n, "group", &auxNodeValue) + if err != nil { + return err + } + refs["Group"] = envoyx.Ref{ + ResourceType: "corteza::system:apigw-group", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under apigwRoute such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ApigwRouteID"] = envoyx.Ref{ + ResourceType: types.ApigwRouteResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ApigwRouteResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwFilter +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalApigwFilterSeq unmarshals ApigwFilter when provided as a sequence node +func (d *auxYamlDoc) unmarshalApigwFilterSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalApigwFilterNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalApigwFilterMap unmarshals ApigwFilter when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +// @note this resource does not support map encoding. +// Refer to the corresponding definition files to adjust if needed. + +// unmarshalApigwFilterNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalApigwFilterNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.ApigwFilter + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "route": + // Handle references + err = y7s.DecodeScalar(n, "route", &auxNodeValue) + if err != nil { + return err + } + refs["Route"] = envoyx.Ref{ + ResourceType: "corteza::system:apigw-route", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under apigwFilter such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ApigwFilterID"] = envoyx.Ref{ + ResourceType: types.ApigwFilterResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ApigwFilterResourceType, + Identifiers: ii, + References: refs, + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource authClient +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalAuthClientSeq unmarshals AuthClient when provided as a sequence node +func (d *auxYamlDoc) unmarshalAuthClientSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalAuthClientNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalAuthClientMap unmarshals AuthClient when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalAuthClientMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalAuthClientNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalAuthClientNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalAuthClientNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.AuthClient + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownedby": + // Handle references + err = y7s.DecodeScalar(n, "ownedBy", &auxNodeValue) + if err != nil { + return err + } + refs["OwnedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Define the scope + // + // This resource is scoped with no parent resources so this resource is the + // root itself (generally the namespace -- the only currently supported scenario). + scope = envoyx.Scope{ + ResourceType: types.AuthClientResourceType, + Identifiers: ii, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under authClient such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["AuthClientID"] = envoyx.Ref{ + ResourceType: types.AuthClientResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.AuthClientResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource queue +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalQueueSeq unmarshals Queue when provided as a sequence node +func (d *auxYamlDoc) unmarshalQueueSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalQueueNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalQueueMap unmarshals Queue when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalQueueMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalQueueNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalQueueNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalQueueNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Queue + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Queue", &r.Queue) + ii = ii.Add(r.Queue) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "queue": + // Handle identifiers + err = y7s.DecodeScalar(n, "queue", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Define the scope + // + // This resource is scoped with no parent resources so this resource is the + // root itself (generally the namespace -- the only currently supported scenario). + scope = envoyx.Scope{ + ResourceType: types.QueueResourceType, + Identifiers: ii, + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under queue such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["QueueID"] = envoyx.Ref{ + ResourceType: types.QueueResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.QueueResourceType, + Identifiers: ii, + References: refs, + + Scope: scope, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource report +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalReportSeq unmarshals Report when provided as a sequence node +func (d *auxYamlDoc) unmarshalReportSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalReportNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalReportMap unmarshals Report when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalReportMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalReportNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalReportNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalReportNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Report + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownedby": + // Handle references + err = y7s.DecodeScalar(n, "ownedBy", &auxNodeValue) + if err != nil { + return err + } + refs["OwnedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under report such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["ReportID"] = envoyx.Ref{ + ResourceType: types.ReportResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.ReportResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource role +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalRoleSeq unmarshals Role when provided as a sequence node +func (d *auxYamlDoc) unmarshalRoleSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalRoleNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalRoleMap unmarshals Role when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalRoleMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalRoleNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalRoleNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalRoleNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Role + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under role such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["RoleID"] = envoyx.Ref{ + ResourceType: types.RoleResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.RoleResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource template +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalTemplateSeq unmarshals Template when provided as a sequence node +func (d *auxYamlDoc) unmarshalTemplateSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalTemplateNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalTemplateMap unmarshals Template when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalTemplateMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalTemplateNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalTemplateNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalTemplateNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.Template + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "ownerid": + // Handle references + err = y7s.DecodeScalar(n, "ownerID", &auxNodeValue) + if err != nil { + return err + } + refs["OwnerID"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under template such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["TemplateID"] = envoyx.Ref{ + ResourceType: types.TemplateResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.TemplateResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource user +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalUserSeq unmarshals User when provided as a sequence node +func (d *auxYamlDoc) unmarshalUserSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalUserNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalUserMap unmarshals User when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalUserMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalUserNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalUserNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalUserNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.User + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under user such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["UserID"] = envoyx.Ref{ + ResourceType: types.UserResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.UserResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalConnection +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalDalConnectionSeq unmarshals DalConnection when provided as a sequence node +func (d *auxYamlDoc) unmarshalDalConnectionSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalDalConnectionNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalDalConnectionMap unmarshals DalConnection when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalDalConnectionMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalDalConnectionNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalDalConnectionNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalDalConnectionNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.DalConnection + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + rbacNodes envoyx.NodeSet + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + // Handle RBAC rules + case "allow": + auxOut, err = unmarshalAllowNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + + case "deny": + auxOut, err = unmarshalDenyNode(n) + if err != nil { + return err + } + rbacNodes = append(rbacNodes, auxOut...) + auxOut = nil + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under dalConnection such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["DalConnectionID"] = envoyx.Ref{ + ResourceType: types.DalConnectionResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.DalConnectionResourceType, + Identifiers: ii, + References: refs, + } + // Update RBAC resource nodes with references regarding the resource + for _, rn := range rbacNodes { + // Since the rule belongs to the resource, it will have the same + // subset of references as the parent resource. + rn.References = envoyx.MergeRefs(rn.References, a.References) + + // The RBAC rule's most specific identifier is the resource itself. + // Using this we can hardcode it to point to the location after the parent resource. + // + // @todo consider using a more descriptive identifier for the position + // such as `index-%d`. + rn.References["0"] = envoyx.Ref{ + ResourceType: a.ResourceType, + Identifiers: a.Identifiers, + Scope: scope, + } + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + out = append(out, rbacNodes...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalSensitivityLevel +// // // // // // // // // // // // // // // // // // // // // // // // // + +// unmarshalDalSensitivityLevelSeq unmarshals DalSensitivityLevel when provided as a sequence node +func (d *auxYamlDoc) unmarshalDalSensitivityLevelSeq(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachSeq(n, func(n *yaml.Node) error { + aux, err = d.unmarshalDalSensitivityLevelNode(dctx, n) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalDalSensitivityLevelMap unmarshals DalSensitivityLevel when provided as a mapping node +// +// When map encoded, the map key is used as a preset identifier. +// The identifier is passed to the node function as a meta node +func (d *auxYamlDoc) unmarshalDalSensitivityLevelMap(dctx documentContext, n *yaml.Node) (out envoyx.NodeSet, err error) { + var aux envoyx.NodeSet + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + aux, err = d.unmarshalDalSensitivityLevelNode(dctx, n, k) + if err != nil { + return err + } + out = append(out, aux...) + + return nil + }) + + return +} + +// unmarshalDalSensitivityLevelNode is a cookie-cutter function to unmarshal +// the yaml node into the corresponding Corteza type & Node +func (d *auxYamlDoc) unmarshalDalSensitivityLevelNode(dctx documentContext, n *yaml.Node, meta ...*yaml.Node) (out envoyx.NodeSet, err error) { + var r *types.DalSensitivityLevel + + // @todo we're omitting errors because there will be a bunch due to invalid + // resource field types. This might be a bit unstable as other errors may + // also get ignored. + // + // A potential fix would be to firstly unmarshal into an any, check errors + // and then unmarshal into the resource while omitting errors. + n.Decode(&r) + + // Identifiers are determined manually when iterating the yaml node. + // This is to help assure there are no duplicates and everything + // was accounted for especially when working with aliases such as + // user_name instead of userName. + ii := envoyx.Identifiers{} + + // When a resource supports mapped input, the key is passed as meta which + // needs to be registered as an identifier (since it is) + if len(meta) > 0 { + y7s.DecodeScalar(meta[0], "Handle", &r.Handle) + ii = ii.Add(r.Handle) + } + + var ( + refs = make(map[string]envoyx.Ref) + auxOut envoyx.NodeSet + nestedNodes envoyx.NodeSet + scope envoyx.Scope + ) + _ = auxOut + _ = refs + + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + var auxNodeValue any + _ = auxNodeValue + + switch strings.ToLower(k.Value) { + + case "createdby": + // Handle references + err = y7s.DecodeScalar(n, "createdBy", &auxNodeValue) + if err != nil { + return err + } + refs["CreatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "deletedby": + // Handle references + err = y7s.DecodeScalar(n, "deletedBy", &auxNodeValue) + if err != nil { + return err + } + refs["DeletedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + case "handle": + // Handle identifiers + err = y7s.DecodeScalar(n, "handle", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "id": + // Handle identifiers + err = y7s.DecodeScalar(n, "id", &auxNodeValue) + if err != nil { + return err + } + ii = ii.Add(auxNodeValue) + + break + + case "updatedby": + // Handle references + err = y7s.DecodeScalar(n, "updatedBy", &auxNodeValue) + if err != nil { + return err + } + refs["UpdatedBy"] = envoyx.Ref{ + ResourceType: "corteza::system:user", + Identifiers: envoyx.MakeIdentifiers(auxNodeValue), + } + + break + + } + + return nil + }) + if err != nil { + return + } + + // Apply the scope to all of the references of the same type + for k, ref := range refs { + if ref.ResourceType != scope.ResourceType { + continue + } + ref.Scope = scope + refs[k] = ref + } + + // Handle any resources that could be inserted under dalSensitivityLevel such as a module inside a namespace + // + // This operation is done in the second pass of the document so we have + // the complete context of the current resource; such as the identifier, + // references, and scope. + err = y7s.EachMap(n, func(k, n *yaml.Node) error { + nestedNodes = nil + + switch strings.ToLower(k.Value) { + + } + + // Iterate nested nodes and update their reference to the current resource + // + // Any reference to the parent resource from the child resource is overwritten + // to avoid potential user-error edge cases. + for _, a := range nestedNodes { + // @note all nested resources fall under the same component and the same scope. + // Simply assign the same scope to all -- if it shouldn't be scoped + // the parent won't have it (saving CPU ticks :) + a.Scope = scope + + if a.References == nil { + a.References = make(map[string]envoyx.Ref) + } + + a.References["DalSensitivityLevelID"] = envoyx.Ref{ + ResourceType: types.DalSensitivityLevelResourceType, + Identifiers: ii, + Scope: scope, + } + + for f, ref := range refs { + a.References[f] = ref + } + } + auxOut = append(auxOut, nestedNodes...) + return nil + }) + if err != nil { + return + } + + a := &envoyx.Node{ + Resource: r, + + ResourceType: types.DalSensitivityLevelResourceType, + Identifiers: ii, + References: refs, + } + + // Put it all together... + out = append(out, a) + out = append(out, auxOut...) + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// RBAC unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalAllowNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Allow) +} + +func unmarshalDenyNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return unmarshalRBACNode(n, rbac.Deny) +} + +func unmarshalRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + if y7s.IsMapping(n.Content[1]) { + return unmarshalNestedRBACNode(n, acc) + } + + return unmarshalFlatRBACNode(n, acc) +} + +// unmarshalNestedRBACNode handles RBAC rules when they are nested inside a resource +// +// The edge-case exists since the node doesn't explicitly specify the resource +// it belongs to. +// +// Example: +// +// modules: +// module1: +// name: "module 1" +// fields: ... +// allow: +// role1: +// - read +// - delete +func unmarshalNestedRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + // Handles role + return out, y7s.EachMap(n, func(role, perm *yaml.Node) error { + // Handles operation + return y7s.EachMap(perm, func(res, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Resource: res.Value, + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: envoyx.MergeRefs( + map[string]envoyx.Ref{"RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }}, + envoyx.SplitResourceIdentifier(res.Value), + ), + }) + return nil + }) + }) +} + +// unmarshalFlatRBACNode handles RBAC rules when they are provided on the root level +// +// Example: +// +// allow: +// role1: +// corteza::system/: +// - users.search +// - users.create +func unmarshalFlatRBACNode(n *yaml.Node, acc rbac.Access) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(role, op *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &rbac.Rule{ + Operation: op.Value, + Access: acc, + }, + ResourceType: rbac.RuleResourceType, + References: map[string]envoyx.Ref{ + "RoleID": { + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:role", + Identifiers: envoyx.MakeIdentifiers(role.Value), + }, + }, + }) + return nil + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// i18n unmarshal logic +// // // // // // // // // // // // // // // // // // // // // // // // // + +func unmarshalLocaleNode(n *yaml.Node) (out envoyx.NodeSet, err error) { + return out, y7s.EachMap(n, func(lang, loc *yaml.Node) error { + langTag := systemTypes.Lang{Tag: language.Make(lang.Value)} + + return y7s.EachMap(loc, func(res, kv *yaml.Node) error { + return y7s.EachMap(kv, func(k, msg *yaml.Node) error { + out = append(out, &envoyx.Node{ + Resource: &systemTypes.ResourceTranslation{ + Lang: langTag, + K: k.Value, + Message: msg.Value, + }, + // Providing resource type as plain text to reduce cross component references + ResourceType: "corteza::system:resource-translation", + References: envoyx.SplitResourceIdentifier(res.Value), + }) + return nil + }) + }) + }) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utilities +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (d YamlDecoder) getReader(ctx context.Context, p envoyx.DecodeParams) (r io.Reader, err error) { + aux, ok := p.Params["stream"] + if ok { + r, ok = aux.(io.Reader) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML decoder expects a stream conforming to io.Reader interface") + return +} + +func (d *auxYamlDoc) getDocumentContext(n *yaml.Node) (dctx documentContext, err error) { + dctx = documentContext{ + references: make(map[string]string), + } + + err = y7s.EachMap(n, func(k, v *yaml.Node) error { + // @todo expand when needed. The previous implementation only supported + // namespaces on the root of the document. + + if y7s.IsKind(v, yaml.ScalarNode) { + dctx.references[k.Value] = v.Value + } + + return nil + }) + + return +} diff --git a/server/system/envoy/yaml_decode.go b/server/system/envoy/yaml_decode.go new file mode 100644 index 000000000..bea5b9329 --- /dev/null +++ b/server/system/envoy/yaml_decode.go @@ -0,0 +1,10 @@ +package envoy + +import ( + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "gopkg.in/yaml.v3" +) + +func (d *auxYamlDoc) unmarshalYAML(k string, n *yaml.Node) (out envoyx.NodeSet, err error) { + return +} diff --git a/server/system/envoy/yaml_encode.gen.go b/server/system/envoy/yaml_encode.gen.go new file mode 100644 index 000000000..02b98a4f9 --- /dev/null +++ b/server/system/envoy/yaml_encode.gen.go @@ -0,0 +1,1086 @@ +package envoy + +// This file is auto-generated. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/cortezaproject/corteza/server/pkg/envoyx" + "github.com/cortezaproject/corteza/server/pkg/y7s" + "github.com/cortezaproject/corteza/server/system/types" + "gopkg.in/yaml.v3" +) + +type ( + // YamlEncoder is responsible for encoding Corteza resources into + // a YAML supported format + YamlEncoder struct{} +) + +// Encode encodes the given Corteza resources into some YAML supported format +// +// Encoding should not do any additional processing apart from matching with +// dependencies and runtime validation +// +// Preparation runs validation, default value initialization, matching with +// already existing instances, ... +// +// The prepare function receives a set of nodes grouped by the resource type. +// This enables some batching optimization and simplifications when it comes to +// matching with existing resources. +// +// Prepare does not receive any placeholder nodes which are used solely +// for dependency resolution. +func (e YamlEncoder) Encode(ctx context.Context, p envoyx.EncodeParams, rt string, nodes envoyx.NodeSet, tt envoyx.Traverser) (err error) { + var ( + out *yaml.Node + aux *yaml.Node + ) + _ = aux + + w, err := e.getWriter(p) + if err != nil { + return + } + + switch rt { + case types.ApplicationResourceType: + aux, err = e.encodeApplications(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "application", aux) + if err != nil { + return + } + case types.ApigwRouteResourceType: + aux, err = e.encodeApigwRoutes(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "apigwRoute", aux) + if err != nil { + return + } + case types.ApigwFilterResourceType: + aux, err = e.encodeApigwFilters(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "apigwFilter", aux) + if err != nil { + return + } + case types.AuthClientResourceType: + aux, err = e.encodeAuthClients(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "authClient", aux) + if err != nil { + return + } + + case types.QueueResourceType: + aux, err = e.encodeQueues(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "queue", aux) + if err != nil { + return + } + + case types.ReportResourceType: + aux, err = e.encodeReports(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "report", aux) + if err != nil { + return + } + + case types.RoleResourceType: + aux, err = e.encodeRoles(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "role", aux) + if err != nil { + return + } + + case types.TemplateResourceType: + aux, err = e.encodeTemplates(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "template", aux) + if err != nil { + return + } + case types.UserResourceType: + aux, err = e.encodeUsers(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "user", aux) + if err != nil { + return + } + case types.DalConnectionResourceType: + aux, err = e.encodeDalConnections(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "dalConnection", aux) + if err != nil { + return + } + case types.DalSensitivityLevelResourceType: + aux, err = e.encodeDalSensitivityLevels(ctx, p, nodes, tt) + if err != nil { + return + } + // Root level resources are always encoded as a map + out, err = y7s.AddMap(out, "dalSensitivityLevel", aux) + if err != nil { + return + } + } + + return yaml.NewEncoder(w).Encode(out) +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource application +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeApplications(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeApplication(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeApplication focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeApplication(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Application) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxOwnerID, err := e.encodeRef(p, res.OwnerID, "OwnerID", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "enabled", res.Enabled, + "id", res.ID, + "name", res.Name, + "ownerID", auxOwnerID, + "unify", res.Unify, + "updatedAt", auxUpdatedAt, + "weight", res.Weight, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwRoute +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeApigwRoutes(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeApigwRoute(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeApigwRoute focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeApigwRoute(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.ApigwRoute) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxGroup, err := e.encodeRef(p, res.Group, "Group", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "enabled", res.Enabled, + "endpoint", res.Endpoint, + "group", auxGroup, + "id", res.ID, + "meta", res.Meta, + "method", res.Method, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource apigwFilter +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeApigwFilters(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeApigwFilter(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeApigwFilter focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeApigwFilter(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.ApigwFilter) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxRoute, err := e.encodeRef(p, res.Route, "Route", node, tt) + if err != nil { + return + } + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "enabled", res.Enabled, + "id", res.ID, + "kind", res.Kind, + "params", res.Params, + "ref", res.Ref, + "route", auxRoute, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + "weight", res.Weight, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource authClient +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeAuthClients(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeAuthClient(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeAuthClient focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeAuthClient(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.AuthClient) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxExpiresAt, err := e.encodeTimestampNil(p, res.ExpiresAt) + if err != nil { + return + } + + auxOwnedBy, err := e.encodeRef(p, res.OwnedBy, "OwnedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + auxValidFrom, err := e.encodeTimestampNil(p, res.ValidFrom) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "enabled", res.Enabled, + "expiresAt", auxExpiresAt, + "handle", res.Handle, + "id", res.ID, + "meta", res.Meta, + "ownedBy", auxOwnedBy, + "redirectURI", res.RedirectURI, + "scope", res.Scope, + "secret", res.Secret, + "security", res.Security, + "trusted", res.Trusted, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + "validFrom", auxValidFrom, + "validGrant", res.ValidGrant, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource queue +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeQueues(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeQueue(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeQueue focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeQueue(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Queue) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "consumer", res.Consumer, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "id", res.ID, + "meta", res.Meta, + "queue", res.Queue, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource report +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeReports(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeReport(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeReport focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeReport(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Report) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxOwnedBy, err := e.encodeRef(p, res.OwnedBy, "OwnedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "blocks", res.Blocks, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "handle", res.Handle, + "id", res.ID, + "meta", res.Meta, + "ownedBy", auxOwnedBy, + "scenarios", res.Scenarios, + "sources", res.Sources, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource role +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeRoles(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeRole(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeRole focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeRole(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Role) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxArchivedAt, err := e.encodeTimestampNil(p, res.ArchivedAt) + if err != nil { + return + } + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "archivedAt", auxArchivedAt, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "handle", res.Handle, + "id", res.ID, + "meta", res.Meta, + "name", res.Name, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource template +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeTemplates(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeTemplate(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeTemplate focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeTemplate(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.Template) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxLastUsedAt, err := e.encodeTimestampNil(p, res.LastUsedAt) + if err != nil { + return + } + + auxOwnerID, err := e.encodeRef(p, res.OwnerID, "OwnerID", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "handle", res.Handle, + "id", res.ID, + "language", res.Language, + "lastUsedAt", auxLastUsedAt, + "meta", res.Meta, + "ownerID", auxOwnerID, + "partial", res.Partial, + "template", res.Template, + "type", res.Type, + "updatedAt", auxUpdatedAt, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource user +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeUsers(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeUser(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeUser focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeUser(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.User) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + + auxSuspendedAt, err := e.encodeTimestampNil(p, res.SuspendedAt) + if err != nil { + return + } + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "deletedAt", auxDeletedAt, + "email", res.Email, + "emailConfirmed", res.EmailConfirmed, + "handle", res.Handle, + "id", res.ID, + "kind", res.Kind, + "meta", res.Meta, + "name", res.Name, + "suspendedAt", auxSuspendedAt, + "updatedAt", auxUpdatedAt, + "username", res.Username, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalConnection +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeDalConnections(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeDalConnection(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeDalConnection focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeDalConnection(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.DalConnection) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "config", res.Config, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "handle", res.Handle, + "id", res.ID, + "meta", res.Meta, + "type", res.Type, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Functions for resource dalSensitivityLevel +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeDalSensitivityLevels(ctx context.Context, p envoyx.EncodeParams, nodes envoyx.NodeSet, tt envoyx.Traverser) (out *yaml.Node, err error) { + var aux *yaml.Node + for _, n := range nodes { + aux, err = e.encodeDalSensitivityLevel(ctx, p, n, tt) + if err != nil { + return + } + + out, err = y7s.AddSeq(out, aux) + if err != nil { + return + } + } + + return +} + +// encodeDalSensitivityLevel focuses on the specific resource invoked by the Encode method +func (e YamlEncoder) encodeDalSensitivityLevel(ctx context.Context, p envoyx.EncodeParams, node *envoyx.Node, tt envoyx.Traverser) (out *yaml.Node, err error) { + res := node.Resource.(*types.DalSensitivityLevel) + + // Pre-compute some map values so we can omit error checking when encoding yaml nodes + auxCreatedAt, err := e.encodeTimestamp(p, res.CreatedAt) + if err != nil { + return + } + auxCreatedBy, err := e.encodeRef(p, res.CreatedBy, "CreatedBy", node, tt) + if err != nil { + return + } + auxDeletedAt, err := e.encodeTimestampNil(p, res.DeletedAt) + if err != nil { + return + } + auxDeletedBy, err := e.encodeRef(p, res.DeletedBy, "DeletedBy", node, tt) + if err != nil { + return + } + + auxUpdatedAt, err := e.encodeTimestampNil(p, res.UpdatedAt) + if err != nil { + return + } + auxUpdatedBy, err := e.encodeRef(p, res.UpdatedBy, "UpdatedBy", node, tt) + if err != nil { + return + } + + out, err = y7s.AddMap(out, + "createdAt", auxCreatedAt, + "createdBy", auxCreatedBy, + "deletedAt", auxDeletedAt, + "deletedBy", auxDeletedBy, + "handle", res.Handle, + "id", res.ID, + "level", res.Level, + "meta", res.Meta, + "updatedAt", auxUpdatedAt, + "updatedBy", auxUpdatedBy, + ) + if err != nil { + return + } + + // Handle nested resources + var aux *yaml.Node + _ = aux + + return +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Encoding utils +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) encodeTimestamp(p envoyx.EncodeParams, t time.Time) (any, error) { + if t.IsZero() { + return nil, nil + } + + tz := p.Config.PreferredTimezone + if tz != "" { + tzL, err := time.LoadLocation(tz) + if err != nil { + return nil, err + } + t = t.In(tzL) + } + + ly := p.Config.PreferredTimeLayout + if ly == "" { + ly = time.RFC3339 + } + + return t.Format(ly), nil +} + +func (e YamlEncoder) encodeTimestampNil(p envoyx.EncodeParams, t *time.Time) (any, error) { + if t == nil { + return nil, nil + } + + // @todo timestamp encoding format + return e.encodeTimestamp(p, *t) +} + +func (e YamlEncoder) encodeRef(p envoyx.EncodeParams, id uint64, field string, node *envoyx.Node, tt envoyx.Traverser) (any, error) { + parent := tt.ParentForRef(node, node.References[field]) + + // @todo should we panic instead? + // for now gracefully fallback to the ID + if parent == nil { + return id, nil + } + + return node.Identifiers.FriendlyIdentifier(), nil +} + +// // // // // // // // // // // // // // // // // // // // // // // // // +// Utility functions +// // // // // // // // // // // // // // // // // // // // // // // // // + +func (e YamlEncoder) getWriter(p envoyx.EncodeParams) (out io.Writer, err error) { + aux, ok := p.Params["writer"] + if ok { + out, ok = aux.(io.Writer) + if ok { + return + } + } + + // @todo consider adding support for managing files from a location + err = fmt.Errorf("YAML encoder expects a writer conforming to io.Writer interface") + return +} diff --git a/server/system/queue.cue b/server/system/queue.cue index db18accc1..b41316d29 100644 --- a/server/system/queue.cue +++ b/server/system/queue.cue @@ -22,6 +22,9 @@ queue: { sortable: true, goType: "string" dal: {} + envoy: { + identifier: true + } } meta: { goType: "types.QueueMeta" @@ -45,14 +48,28 @@ queue: { filter: { struct: { + queue_id: {goType: "uint64", ident: "queueID", storeIdent: "id"} query: {goType: "string"} deleted: {goType: "filter.State", storeIdent: "deleted_at"} } query: ["queue", "consumer"] + byValue: ["queue_id"] byNilState: ["deleted"] } + envoy: { + scoped: true + yaml: { + supportMappedInput: true + mappedField: "Queue" + identKeyAlias: [] + } + store: { + handleField: "" + } + } + rbac: { operations: { "read": description: "Read queue" diff --git a/server/system/queue_message.cue b/server/system/queue_message.cue index 0e38f2b34..570f17e88 100644 --- a/server/system/queue_message.cue +++ b/server/system/queue_message.cue @@ -32,6 +32,10 @@ queue_message: { } } + envoy: { + omit: true + } + filter: { struct: { queue: {} diff --git a/server/system/reminder.cue b/server/system/reminder.cue index 6a555ca7e..f19e052a4 100644 --- a/server/system/reminder.cue +++ b/server/system/reminder.cue @@ -44,6 +44,10 @@ reminder: { } } + envoy: { + omit: true + } + filter: { struct: { reminder_id: {goType: "[]uint64", ident: "reminderID", storeIdent: "id"} diff --git a/server/system/report.cue b/server/system/report.cue index 873ec5424..888017745 100644 --- a/server/system/report.cue +++ b/server/system/report.cue @@ -48,6 +48,15 @@ report: { } } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["reports"] + } + store: {} + } + filter: { struct: { report_id: {goType: "[]uint64", storeIdent: "id", ident: "reportID" } diff --git a/server/system/resource_translation.cue b/server/system/resource_translation.cue index 2ded41e4d..94173ee6f 100644 --- a/server/system/resource_translation.cue +++ b/server/system/resource_translation.cue @@ -52,6 +52,11 @@ resource_translation: { } } + envoy: { + // Special handling for i18n + omit: true + } + filter: { struct: { translation_id: {goType: "[]uint64", ident: "translationID" } diff --git a/server/system/rest/dal_connection.go b/server/system/rest/dal_connection.go index b6027d3b1..feb1f2b62 100644 --- a/server/system/rest/dal_connection.go +++ b/server/system/rest/dal_connection.go @@ -77,9 +77,9 @@ func (ctrl DalConnection) List(ctx context.Context, r *request.DalConnectionList dalConnections types.DalConnectionSet f = types.DalConnectionFilter{ - ConnectionID: payload.ParseUint64s(r.ConnectionID), - Handle: r.Handle, - Type: r.Type, + DalConnectionID: payload.ParseUint64s(r.ConnectionID), + Handle: r.Handle, + Type: r.Type, Deleted: filter.State(r.Deleted), } @@ -243,8 +243,8 @@ func (ctrl DalConnection) filterConnections(baseConnections types.DalConnectionS for _, conn := range baseConnections { include := true - if len(f.ConnectionID) > 0 { - include = include && ctrl.inIDSet(f.ConnectionID, conn.ID) + if len(f.DalConnectionID) > 0 { + include = include && ctrl.inIDSet(f.DalConnectionID, conn.ID) } if f.Handle != "" { diff --git a/server/system/rest/data_privacy.go b/server/system/rest/data_privacy.go index e86ea7529..c5bb91733 100644 --- a/server/system/rest/data_privacy.go +++ b/server/system/rest/data_privacy.go @@ -39,9 +39,9 @@ func (ctrl DataPrivacy) ConnectionList(ctx context.Context, r *request.DataPriva set types.PrivacyDalConnectionSet f = types.DalConnectionFilter{ - ConnectionID: payload.ParseUint64s(r.ConnectionID), - Handle: r.Handle, - Type: r.Type, + DalConnectionID: payload.ParseUint64s(r.ConnectionID), + Handle: r.Handle, + Type: r.Type, Deleted: r.Deleted, } diff --git a/server/system/rest/sensitivity_level.go b/server/system/rest/sensitivity_level.go index 066019210..c35eeccdf 100644 --- a/server/system/rest/sensitivity_level.go +++ b/server/system/rest/sensitivity_level.go @@ -50,7 +50,7 @@ func (ctrl SensitivityLevel) List(ctx context.Context, r *request.DalSensitivity set types.DalSensitivityLevelSet f = types.DalSensitivityLevelFilter{ - SensitivityLevelID: payload.ParseUint64s(r.SensitivityLevelID), + DalSensitivityLevelID: payload.ParseUint64s(r.SensitivityLevelID), Deleted: filter.State(r.Deleted), } diff --git a/server/system/role.cue b/server/system/role.cue index f615ea89e..7284ed22b 100644 --- a/server/system/role.cue +++ b/server/system/role.cue @@ -47,6 +47,15 @@ role: { byNilState: ["deleted", "archived"] } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["roles"] + } + store: {} + } + rbac: { operations: { read: description: "Read role" diff --git a/server/system/role_member.cue b/server/system/role_member.cue index c63596013..a1feca4e9 100644 --- a/server/system/role_member.cue +++ b/server/system/role_member.cue @@ -38,6 +38,10 @@ role_member: { byValue: [ "user_id", "role_id"] } + envoy: { + omit: true + } + store: { api: { lookups: [] diff --git a/server/system/settings.cue b/server/system/settings.cue index eab2b2bce..962f28683 100644 --- a/server/system/settings.cue +++ b/server/system/settings.cue @@ -58,6 +58,10 @@ settings: { byValue: [ "owned_by" ] } + envoy: { + omit: true + } + store: { api: { lookups: [ diff --git a/server/system/template.cue b/server/system/template.cue index 5c5420ca2..694ede4d8 100644 --- a/server/system/template.cue +++ b/server/system/template.cue @@ -77,6 +77,15 @@ template: { byNilState: ["deleted"] } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["templates"] + } + store: {} + } + rbac: { operations: { read: description: "Read template" diff --git a/server/system/types/apigw_filter.go b/server/system/types/apigw_filter.go index 9c4bdcd54..7e717d768 100644 --- a/server/system/types/apigw_filter.go +++ b/server/system/types/apigw_filter.go @@ -31,7 +31,8 @@ type ( } ApigwFilterFilter struct { - RouteID uint64 `json:"routeID,string"` + ApigwFilterID []uint64 `json:"apigwFilterID"` + RouteID uint64 `json:"routeID,string"` Deleted filter.State `json:"deleted"` Disabled filter.State `json:"disabled"` diff --git a/server/system/types/apigw_route.go b/server/system/types/apigw_route.go index ec38497fc..627e04fe7 100644 --- a/server/system/types/apigw_route.go +++ b/server/system/types/apigw_route.go @@ -34,9 +34,10 @@ type ( } ApigwRouteFilter struct { - Route string `json:"route"` - Endpoint string `json:"endpoint"` - Method string `json:"method"` + ApigwRouteID []uint64 `json:"apigwRouteID"` + Route string `json:"route"` + Endpoint string `json:"endpoint"` + Method string `json:"method"` Deleted filter.State `json:"deleted"` Disabled filter.State `json:"disabled"` diff --git a/server/system/types/applications.go b/server/system/types/applications.go index a5c012a7c..25f5ed4de 100644 --- a/server/system/types/applications.go +++ b/server/system/types/applications.go @@ -3,9 +3,10 @@ package types import ( "database/sql/driver" "encoding/json" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/filter" ) @@ -42,8 +43,9 @@ type ( } ApplicationFilter struct { - Name string `json:"name"` - Query string `json:"query"` + ApplicationID []uint64 `json:"applicationID"` + Name string `json:"name"` + Query string `json:"query"` LabeledIDs []uint64 `json:"-"` Labels map[string]string `json:"labels,omitempty"` diff --git a/server/system/types/auth_client.go b/server/system/types/auth_client.go index 7f3eece43..fca79b39b 100644 --- a/server/system/types/auth_client.go +++ b/server/system/types/auth_client.go @@ -4,9 +4,10 @@ import ( "database/sql/driver" "encoding/json" "fmt" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/filter" ) @@ -84,7 +85,7 @@ type ( } AuthClientFilter struct { - ClientID []uint64 `json:"authClientID"` + AuthClientID []uint64 `json:"authClientID"` Handle string `json:"handle"` diff --git a/server/system/types/dal_connection.go b/server/system/types/dal_connection.go index df81e46e8..cdf9296db 100644 --- a/server/system/types/dal_connection.go +++ b/server/system/types/dal_connection.go @@ -97,9 +97,9 @@ type ( // ........................................................................ DalConnectionFilter struct { - ConnectionID []uint64 `json:"connectionID,string"` - Handle string `json:"handle"` - Type string `json:"type"` + DalConnectionID []uint64 `json:"connectionID,string"` + Handle string `json:"handle"` + Type string `json:"type"` Deleted filter.State `json:"deleted"` diff --git a/server/system/types/dal_sensitivity_level.go b/server/system/types/dal_sensitivity_level.go index 1117a3ccb..7af425a63 100644 --- a/server/system/types/dal_sensitivity_level.go +++ b/server/system/types/dal_sensitivity_level.go @@ -3,9 +3,10 @@ package types import ( "database/sql/driver" "encoding/json" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/filter" ) @@ -33,7 +34,8 @@ type ( } DalSensitivityLevelFilter struct { - SensitivityLevelID []uint64 `json:"sensitivityLevelID,string"` + DalSensitivityLevelID []uint64 `json:"sensitivityLevelID,string"` + Handle string `json:"handle"` Deleted filter.State `json:"deleted"` diff --git a/server/system/types/getters_setters.gen.go b/server/system/types/getters_setters.gen.go index dee462e25..83c6446b9 100644 --- a/server/system/types/getters_setters.gen.go +++ b/server/system/types/getters_setters.gen.go @@ -234,6 +234,90 @@ func (r *ApigwFilter) SetValue(name string, pos uint, value any) (err error) { return nil } +func (r AuthClient) GetID() uint64 { return r.ID } + +func (r *AuthClient) GetValue(name string, pos uint) (any, error) { + switch name { + case "createdAt", "CreatedAt": + return r.CreatedAt, nil + case "createdBy", "CreatedBy": + return r.CreatedBy, nil + case "deletedAt", "DeletedAt": + return r.DeletedAt, nil + case "deletedBy", "DeletedBy": + return r.DeletedBy, nil + case "enabled", "Enabled": + return r.Enabled, nil + case "expiresAt", "ExpiresAt": + return r.ExpiresAt, nil + case "handle", "Handle": + return r.Handle, nil + case "id", "ID": + return r.ID, nil + case "ownedBy", "OwnedBy": + return r.OwnedBy, nil + case "redirectURI", "RedirectURI": + return r.RedirectURI, nil + case "scope", "Scope": + return r.Scope, nil + case "secret", "Secret": + return r.Secret, nil + case "trusted", "Trusted": + return r.Trusted, nil + case "updatedAt", "UpdatedAt": + return r.UpdatedAt, nil + case "updatedBy", "UpdatedBy": + return r.UpdatedBy, nil + case "validFrom", "ValidFrom": + return r.ValidFrom, nil + case "validGrant", "ValidGrant": + return r.ValidGrant, nil + + } + return nil, nil +} + +func (r *AuthClient) SetValue(name string, pos uint, value any) (err error) { + switch name { + case "createdAt", "CreatedAt": + return cast2.Time(value, &r.CreatedAt) + case "createdBy", "CreatedBy": + return cast2.Uint64(value, &r.CreatedBy) + case "deletedAt", "DeletedAt": + return cast2.TimePtr(value, &r.DeletedAt) + case "deletedBy", "DeletedBy": + return cast2.Uint64(value, &r.DeletedBy) + case "enabled", "Enabled": + return cast2.Bool(value, &r.Enabled) + case "expiresAt", "ExpiresAt": + return cast2.TimePtr(value, &r.ExpiresAt) + case "handle", "Handle": + return cast2.String(value, &r.Handle) + case "id", "ID": + return cast2.Uint64(value, &r.ID) + case "ownedBy", "OwnedBy": + return cast2.Uint64(value, &r.OwnedBy) + case "redirectURI", "RedirectURI": + return cast2.String(value, &r.RedirectURI) + case "scope", "Scope": + return cast2.String(value, &r.Scope) + case "secret", "Secret": + return cast2.String(value, &r.Secret) + case "trusted", "Trusted": + return cast2.Bool(value, &r.Trusted) + case "updatedAt", "UpdatedAt": + return cast2.TimePtr(value, &r.UpdatedAt) + case "updatedBy", "UpdatedBy": + return cast2.Uint64(value, &r.UpdatedBy) + case "validFrom", "ValidFrom": + return cast2.TimePtr(value, &r.ValidFrom) + case "validGrant", "ValidGrant": + return cast2.String(value, &r.ValidGrant) + + } + return nil +} + func (r DataPrivacyRequestComment) GetID() uint64 { return r.ID } func (r *DataPrivacyRequestComment) GetValue(name string, pos uint) (any, error) { diff --git a/server/system/types/queue.go b/server/system/types/queue.go index 7796a8bda..b1693c2a6 100644 --- a/server/system/types/queue.go +++ b/server/system/types/queue.go @@ -3,9 +3,10 @@ package types import ( "database/sql/driver" "encoding/json" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/filter" "github.com/spf13/cast" ) @@ -26,6 +27,7 @@ type ( } QueueFilter struct { + QueueID []uint64 `json:"queueID"` Query string `json:"query"` Deleted filter.State `json:"deleted"` diff --git a/server/system/types/user.go b/server/system/types/user.go index ba99a2277..71349fadb 100644 --- a/server/system/types/user.go +++ b/server/system/types/user.go @@ -3,10 +3,10 @@ package types import ( "database/sql/driver" "encoding/json" - "fmt" - "github.com/cortezaproject/corteza/server/pkg/sql" "time" + "github.com/cortezaproject/corteza/server/pkg/sql" + "github.com/cortezaproject/corteza/server/pkg/filter" ) @@ -117,9 +117,9 @@ const ( SystemUser UserKind = "sys" ) -func (u User) String() string { - return fmt.Sprintf("%d", u.ID) -} +// func (u User) String() string { +// return fmt.Sprintf("%d", u.ID) +// } func (u *User) Valid() bool { return u.ID > 0 && u.SuspendedAt == nil && u.DeletedAt == nil diff --git a/server/system/user.cue b/server/system/user.cue index 52d51cf82..a751e5a67 100644 --- a/server/system/user.cue +++ b/server/system/user.cue @@ -23,7 +23,6 @@ user: { unique: true, ignoreCase: true dal: {} - } name: { sortable: true @@ -86,6 +85,15 @@ user: { byNilState: ["deleted", "suspended"] } + envoy: { + yaml: { + supportMappedInput: true + mappedField: "Handle" + identKeyAlias: ["users", "usr"] + } + store: {} + } + rbac: { operations: { "read": description: "Read user"