Compare commits

...

11 Commits

Author SHA1 Message Date
allanice001
4d37a6363f feat: add docker_image and docker_tag to cluster api
Signed-off-by: allanice001 <allanice001@gmail.com>
2025-12-08 17:04:10 +00:00
allanice001
1dbdd04808 Merge remote-tracking branch 'origin/main' 2025-12-08 16:19:03 +00:00
allanice001
45b55015ac fix: ensure jobs are running
Signed-off-by: allanice001 <allanice001@gmail.com>
2025-12-08 16:18:54 +00:00
public-glueops-renovatebot[bot]
6b191089a5 chore: lock file maintenance (#418)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-08 14:42:04 +00:00
public-glueops-renovatebot[bot]
d2e6ff9812 feat: update lucide-react to 0.556.0 #minor (#408)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-08 13:46:35 +00:00
public-glueops-renovatebot[bot]
98a6cf7e51 feat: update golang.org/x/oauth2 to v0.34.0 #minor (#417)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-08 12:31:38 +00:00
public-glueops-renovatebot[bot]
fb4af74e3c chore: lock file maintenance (#414)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-07 14:38:05 +00:00
public-glueops-renovatebot[bot]
1021e06655 chore: lock file maintenance (#413)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-06 18:42:45 +00:00
public-glueops-renovatebot[bot]
c6be7bf8eb chore: lock file maintenance (#411)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-06 14:37:41 +00:00
public-glueops-renovatebot[bot]
1429c40b2b chore: lock file maintenance (#410)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-05 16:43:39 +00:00
public-glueops-renovatebot[bot]
73c4904a42 chore: lock file maintenance (#409)
Co-authored-by: public-glueops-renovatebot[bot] <186083205+public-glueops-renovatebot[bot]@users.noreply.github.com>
2025-12-05 13:54:39 +00:00
110 changed files with 1817 additions and 459 deletions

View File

@@ -115,6 +115,47 @@ var serveCmd = &cobra.Command{
if err != nil {
log.Printf("failed to enqueue bootstrap_bastion: %v", err)
}
_, err = jobs.Enqueue(
context.Background(),
uuid.NewString(),
"prepare_cluster",
bg.ClusterPrepareArgs{IntervalS: 120},
archer.WithMaxRetries(3),
archer.WithScheduleTime(time.Now().Add(60*time.Second)),
)
if err != nil {
log.Printf("failed to enqueue prepare_cluster: %v", err)
}
_, err = jobs.Enqueue(
context.Background(),
uuid.NewString(),
"cluster_setup",
bg.ClusterSetupArgs{
IntervalS: 120,
},
archer.WithMaxRetries(3),
archer.WithScheduleTime(time.Now().Add(60*time.Second)),
)
if err != nil {
log.Printf("failed to enqueue cluster setup: %v", err)
}
_, err = jobs.Enqueue(
context.Background(),
uuid.NewString(),
"cluster_bootstrap",
bg.ClusterBootstrapArgs{
IntervalS: 120,
},
archer.WithMaxRetries(3),
archer.WithScheduleTime(time.Now().Add(60*time.Second)),
)
if err != nil {
log.Printf("failed to enqueue cluster bootstrap: %v", err)
}
}
_ = auth.Refresh(rt.DB, rt.Cfg.JWTPrivateEncKey)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -96,6 +96,10 @@ components:
$ref: '#/components/schemas/dto.RecordSetResponse'
created_at:
type: string
docker_image:
type: string
docker_tag:
type: string
glueops_load_balancer:
$ref: '#/components/schemas/dto.LoadBalancerResponse'
id:
@@ -129,6 +133,10 @@ components:
properties:
cluster_provider:
type: string
docker_image:
type: string
docker_tag:
type: string
name:
type: string
region:
@@ -713,6 +721,10 @@ components:
properties:
cluster_provider:
type: string
docker_image:
type: string
docker_tag:
type: string
name:
type: string
region:

2
go.mod
View File

@@ -26,7 +26,7 @@ require (
github.com/spf13/viper v1.21.0
github.com/swaggo/swag/v2 v2.0.0-rc4
golang.org/x/crypto v0.45.0
golang.org/x/oauth2 v0.33.0
golang.org/x/oauth2 v0.34.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/datatypes v1.2.7
gorm.io/driver/postgres v1.6.0

2
go.sum
View File

@@ -306,6 +306,8 @@ golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -107,6 +107,28 @@ func NewJobs(gdb *gorm.DB, dbUrl string) (*Jobs, error) {
archer.WithInstances(1),
archer.WithTimeout(2*time.Minute),
)
c.Register(
"prepare_cluster",
ClusterPrepareWorker(gdb, jobs),
archer.WithInstances(1),
archer.WithTimeout(2*time.Minute),
)
c.Register(
"cluster_setup",
ClusterSetupWorker(gdb, jobs),
archer.WithInstances(1),
archer.WithTimeout(2*time.Minute),
)
c.Register(
"cluster_bootstrap",
ClusterBootstrapWorker(gdb, jobs),
archer.WithInstances(1),
archer.WithTimeout(60*time.Minute),
)
return jobs, nil
}

View File

@@ -0,0 +1,121 @@
package bg
import (
"context"
"fmt"
"time"
"github.com/dyaksa/archer"
"github.com/dyaksa/archer/job"
"github.com/glueops/autoglue/internal/models"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
)
type ClusterBootstrapArgs struct {
IntervalS int `json:"interval_seconds,omitempty"`
}
type ClusterBootstrapResult struct {
Status string `json:"status"`
Processed int `json:"processed"`
Ready int `json:"ready"`
Failed int `json:"failed"`
ElapsedMs int `json:"elapsed_ms"`
FailedIDs []uuid.UUID `json:"failed_cluster_ids"`
}
func ClusterBootstrapWorker(db *gorm.DB, jobs *Jobs) archer.WorkerFn {
return func(ctx context.Context, j job.Job) (any, error) {
args := ClusterBootstrapArgs{IntervalS: 120}
jobID := j.ID
start := time.Now()
_ = j.ParseArguments(&args)
if args.IntervalS <= 0 {
args.IntervalS = 120
}
var clusters []models.Cluster
if err := db.
Preload("BastionServer.SshKey").
Where("status = ?", clusterStatusProvisioning).
Find(&clusters).Error; err != nil {
log.Error().Err(err).Msg("[cluster_bootstrap] query clusters failed")
return nil, err
}
proc, ready, failCount := 0, 0, 0
var failedIDs []uuid.UUID
perClusterTimeout := 60 * time.Minute
for i := range clusters {
c := &clusters[i]
proc++
if c.BastionServer.ID == uuid.Nil || c.BastionServer.Status != "ready" {
continue
}
logger := log.With().
Str("job", jobID).
Str("cluster_id", c.ID.String()).
Str("cluster_name", c.Name).
Logger()
logger.Info().Msg("[cluster_bootstrap] running make bootstrap")
runCtx, cancel := context.WithTimeout(ctx, perClusterTimeout)
out, err := runMakeOnBastion(runCtx, db, c, "bootstrap")
cancel()
if err != nil {
failCount++
failedIDs = append(failedIDs, c.ID)
logger.Error().Err(err).Str("output", out).Msg("[cluster_bootstrap] make bootstrap failed")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, fmt.Sprintf("make bootstrap: %v", err))
continue
}
// you can choose a different terminal status here if you like
if err := setClusterStatus(db, c.ID, clusterStatusReady, ""); err != nil {
failCount++
failedIDs = append(failedIDs, c.ID)
logger.Error().Err(err).Msg("[cluster_bootstrap] failed to mark cluster ready")
continue
}
ready++
logger.Info().Msg("[cluster_bootstrap] cluster marked ready")
}
res := ClusterBootstrapResult{
Status: "ok",
Processed: proc,
Ready: ready,
Failed: failCount,
ElapsedMs: int(time.Since(start).Milliseconds()),
FailedIDs: failedIDs,
}
log.Info().
Int("processed", proc).
Int("ready", ready).
Int("failed", failCount).
Msg("[cluster_bootstrap] reconcile tick ok")
// self-reschedule
next := time.Now().Add(time.Duration(args.IntervalS) * time.Second)
_, _ = jobs.Enqueue(
ctx,
uuid.NewString(),
"cluster_bootstrap",
args,
archer.WithScheduleTime(next),
archer.WithMaxRetries(1),
)
return res, nil
}
}

View File

@@ -0,0 +1,120 @@
package bg
import (
"context"
"fmt"
"time"
"github.com/dyaksa/archer"
"github.com/dyaksa/archer/job"
"github.com/glueops/autoglue/internal/models"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
)
type ClusterSetupArgs struct {
IntervalS int `json:"interval_seconds,omitempty"`
}
type ClusterSetupResult struct {
Status string `json:"status"`
Processed int `json:"processed"`
Provisioning int `json:"provisioning"`
Failed int `json:"failed"`
ElapsedMs int `json:"elapsed_ms"`
FailedCluster []uuid.UUID `json:"failed_cluster_ids"`
}
func ClusterSetupWorker(db *gorm.DB, jobs *Jobs) archer.WorkerFn {
return func(ctx context.Context, j job.Job) (any, error) {
args := ClusterSetupArgs{IntervalS: 120}
jobID := j.ID
start := time.Now()
_ = j.ParseArguments(&args)
if args.IntervalS <= 0 {
args.IntervalS = 120
}
var clusters []models.Cluster
if err := db.
Preload("BastionServer.SshKey").
Where("status = ?", clusterStatusPending).
Find(&clusters).Error; err != nil {
log.Error().Err(err).Msg("[cluster_setup] query clusters failed")
return nil, err
}
proc, prov, failCount := 0, 0, 0
var failedIDs []uuid.UUID
perClusterTimeout := 30 * time.Minute
for i := range clusters {
c := &clusters[i]
proc++
if c.BastionServer.ID == uuid.Nil || c.BastionServer.Status != "ready" {
continue
}
logger := log.With().
Str("job", jobID).
Str("cluster_id", c.ID.String()).
Str("cluster_name", c.Name).
Logger()
logger.Info().Msg("[cluster_setup] running make setup")
runCtx, cancel := context.WithTimeout(ctx, perClusterTimeout)
out, err := runMakeOnBastion(runCtx, db, c, "setup")
cancel()
if err != nil {
failCount++
failedIDs = append(failedIDs, c.ID)
logger.Error().Err(err).Str("output", out).Msg("[cluster_setup] make setup failed")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, fmt.Sprintf("make setup: %v", err))
continue
}
if err := setClusterStatus(db, c.ID, clusterStatusProvisioning, ""); err != nil {
failCount++
failedIDs = append(failedIDs, c.ID)
logger.Error().Err(err).Msg("[cluster_setup] failed to mark cluster provisioning")
continue
}
prov++
logger.Info().Msg("[cluster_setup] cluster moved to provisioning")
}
res := ClusterSetupResult{
Status: "ok",
Processed: proc,
Provisioning: prov,
Failed: failCount,
ElapsedMs: int(time.Since(start).Milliseconds()),
FailedCluster: failedIDs,
}
log.Info().
Int("processed", proc).
Int("provisioning", prov).
Int("failed", failCount).
Msg("[cluster_setup] reconcile tick ok")
// self-reschedule
next := time.Now().Add(time.Duration(args.IntervalS) * time.Second)
_, _ = jobs.Enqueue(
ctx,
uuid.NewString(),
"cluster_setup",
args,
archer.WithScheduleTime(next),
archer.WithMaxRetries(1),
)
return res, nil
}
}

View File

@@ -0,0 +1,510 @@
package bg
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net"
"strings"
"time"
"github.com/dyaksa/archer"
"github.com/dyaksa/archer/job"
"github.com/glueops/autoglue/internal/models"
"github.com/glueops/autoglue/internal/utils"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/ssh"
"gorm.io/gorm"
)
type ClusterPrepareArgs struct {
IntervalS int `json:"interval_seconds,omitempty"`
}
type ClusterPrepareFailure struct {
ClusterID uuid.UUID `json:"cluster_id"`
Step string `json:"step"`
Reason string `json:"reason"`
}
type ClusterPrepareResult struct {
Status string `json:"status"`
Processed int `json:"processed"`
MarkedPending int `json:"marked_pending"`
Failed int `json:"failed"`
ElapsedMs int `json:"elapsed_ms"`
FailedIDs []uuid.UUID `json:"failed_cluster_ids"`
Failures []ClusterPrepareFailure `json:"failures"`
}
// Alias the status constants from models to avoid string drift.
const (
clusterStatusPrePending = models.ClusterStatusPrePending
clusterStatusPending = models.ClusterStatusPending
clusterStatusProvisioning = models.ClusterStatusProvisioning
clusterStatusReady = models.ClusterStatusReady
clusterStatusFailed = models.ClusterStatusFailed
)
func ClusterPrepareWorker(db *gorm.DB, jobs *Jobs) archer.WorkerFn {
return func(ctx context.Context, j job.Job) (any, error) {
args := ClusterPrepareArgs{IntervalS: 120}
jobID := j.ID
start := time.Now()
_ = j.ParseArguments(&args)
if args.IntervalS <= 0 {
args.IntervalS = 120
}
// Load all clusters that are pre_pending; well filter for bastion.ready in memory.
var clusters []models.Cluster
if err := db.
Preload("BastionServer.SshKey").
Preload("CaptainDomain").
Preload("ControlPlaneRecordSet").
Preload("NodePools.Servers.SshKey").
Where("status = ?", clusterStatusPrePending).
Find(&clusters).Error; err != nil {
log.Error().Err(err).Msg("[cluster_prepare] query clusters failed")
return nil, err
}
proc, ok, fail := 0, 0, 0
var failedIDs []uuid.UUID
var failures []ClusterPrepareFailure
perClusterTimeout := 8 * time.Minute
for i := range clusters {
c := &clusters[i]
proc++
// bastion must exist and be ready
if c.BastionServer == nil || c.BastionServerID == nil || *c.BastionServerID == uuid.Nil || c.BastionServer.Status != "ready" {
continue
}
clusterLog := log.With().
Str("job", jobID).
Str("cluster_id", c.ID.String()).
Str("cluster_name", c.Name).
Logger()
clusterLog.Info().Msg("[cluster_prepare] starting")
if err := validateClusterForPrepare(c); err != nil {
fail++
failedIDs = append(failedIDs, c.ID)
failures = append(failures, ClusterPrepareFailure{
ClusterID: c.ID,
Step: "validate",
Reason: err.Error(),
})
clusterLog.Error().Err(err).Msg("[cluster_prepare] validation failed")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, err.Error())
continue
}
allServers := flattenClusterServers(c)
keyPayloads, sshConfig, err := buildSSHAssetsForCluster(db, c, allServers)
if err != nil {
fail++
failedIDs = append(failedIDs, c.ID)
failures = append(failures, ClusterPrepareFailure{
ClusterID: c.ID,
Step: "build_ssh_assets",
Reason: err.Error(),
})
clusterLog.Error().Err(err).Msg("[cluster_prepare] build ssh assets failed")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, err.Error())
continue
}
payloadJSON, err := json.MarshalIndent(c, "", " ")
if err != nil {
fail++
failedIDs = append(failedIDs, c.ID)
failures = append(failures, ClusterPrepareFailure{
ClusterID: c.ID,
Step: "marshal_payload",
Reason: err.Error(),
})
clusterLog.Error().Err(err).Msg("[cluster_prepare] json marshal failed")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, err.Error())
continue
}
runCtx, cancel := context.WithTimeout(ctx, perClusterTimeout)
err = pushAssetsToBastion(runCtx, db, c, sshConfig, keyPayloads, payloadJSON)
cancel()
if err != nil {
fail++
failedIDs = append(failedIDs, c.ID)
failures = append(failures, ClusterPrepareFailure{
ClusterID: c.ID,
Step: "ssh_push",
Reason: err.Error(),
})
clusterLog.Error().Err(err).Msg("[cluster_prepare] failed to push assets to bastion")
_ = setClusterStatus(db, c.ID, clusterStatusFailed, err.Error())
continue
}
if err := setClusterStatus(db, c.ID, clusterStatusPending, ""); err != nil {
fail++
failedIDs = append(failedIDs, c.ID)
failures = append(failures, ClusterPrepareFailure{
ClusterID: c.ID,
Step: "set_pending",
Reason: err.Error(),
})
clusterLog.Error().Err(err).Msg("[cluster_prepare] failed to mark cluster pending")
continue
}
ok++
clusterLog.Info().Msg("[cluster_prepare] cluster marked pending")
}
res := ClusterPrepareResult{
Status: "ok",
Processed: proc,
MarkedPending: ok,
Failed: fail,
ElapsedMs: int(time.Since(start).Milliseconds()),
FailedIDs: failedIDs,
Failures: failures,
}
log.Info().
Int("processed", proc).
Int("pending", ok).
Int("failed", fail).
Msg("[cluster_prepare] reconcile tick ok")
next := time.Now().Add(time.Duration(args.IntervalS) * time.Second)
_, _ = jobs.Enqueue(
ctx,
uuid.NewString(),
"prepare_cluster",
args,
archer.WithScheduleTime(next),
archer.WithMaxRetries(1),
)
return res, nil
}
}
// ---------- helpers ----------
func validateClusterForPrepare(c *models.Cluster) error {
if c.BastionServer == nil || c.BastionServerID == nil || *c.BastionServerID == uuid.Nil {
return fmt.Errorf("missing bastion server")
}
if c.BastionServer.Status != "ready" {
return fmt.Errorf("bastion server not ready (status=%s)", c.BastionServer.Status)
}
// CaptainDomain is a value type; presence is via *ID
if c.CaptainDomainID == nil || *c.CaptainDomainID == uuid.Nil {
return fmt.Errorf("missing captain domain for cluster")
}
// ControlPlaneRecordSet is a pointer; presence is via *ID + non-nil struct
if c.ControlPlaneRecordSetID == nil || *c.ControlPlaneRecordSetID == uuid.Nil || c.ControlPlaneRecordSet == nil {
return fmt.Errorf("missing control_plane_record_set for cluster")
}
if len(c.NodePools) == 0 {
return fmt.Errorf("cluster has no node pools")
}
hasServer := false
for i := range c.NodePools {
if len(c.NodePools[i].Servers) > 0 {
hasServer = true
break
}
}
if !hasServer {
return fmt.Errorf("cluster has no servers attached to node pools")
}
return nil
}
func flattenClusterServers(c *models.Cluster) []*models.Server {
var out []*models.Server
for i := range c.NodePools {
for j := range c.NodePools[i].Servers {
s := &c.NodePools[i].Servers[j]
out = append(out, s)
}
}
return out
}
type keyPayload struct {
FileName string
PrivateKeyB64 string
}
// build ssh-config for all servers + decrypt keys.
// ssh-config is intended to live on the bastion and connect via *private* IPs.
func buildSSHAssetsForCluster(db *gorm.DB, c *models.Cluster, servers []*models.Server) (map[uuid.UUID]keyPayload, string, error) {
var sb strings.Builder
keys := make(map[uuid.UUID]keyPayload)
for _, s := range servers {
// Defensive checks
if strings.TrimSpace(s.PrivateIPAddress) == "" {
return nil, "", fmt.Errorf("server %s missing private ip", s.ID)
}
if s.SshKeyID == uuid.Nil {
return nil, "", fmt.Errorf("server %s missing ssh key relation", s.ID)
}
// de-dupe keys: many servers may share the same ssh key
if _, ok := keys[s.SshKeyID]; !ok {
priv, err := utils.DecryptForOrg(
s.OrganizationID,
s.SshKey.EncryptedPrivateKey,
s.SshKey.PrivateIV,
s.SshKey.PrivateTag,
db,
)
if err != nil {
return nil, "", fmt.Errorf("decrypt key for server %s: %w", s.ID, err)
}
fname := fmt.Sprintf("%s.pem", s.SshKeyID.String())
keys[s.SshKeyID] = keyPayload{
FileName: fname,
PrivateKeyB64: base64.StdEncoding.EncodeToString([]byte(priv)),
}
}
// ssh config entry per server
keyFile := keys[s.SshKeyID].FileName
hostAlias := s.Hostname
if hostAlias == "" {
hostAlias = s.ID.String()
}
sb.WriteString(fmt.Sprintf("Host %s\n", hostAlias))
sb.WriteString(fmt.Sprintf(" HostName %s\n", s.PrivateIPAddress))
sb.WriteString(fmt.Sprintf(" User %s\n", s.SSHUser))
sb.WriteString(fmt.Sprintf(" IdentityFile ~/.ssh/autoglue/keys/%s\n", keyFile))
sb.WriteString(" IdentitiesOnly yes\n")
sb.WriteString(" StrictHostKeyChecking accept-new\n\n")
}
return keys, sb.String(), nil
}
func pushAssetsToBastion(
ctx context.Context,
db *gorm.DB,
c *models.Cluster,
sshConfig string,
keyPayloads map[uuid.UUID]keyPayload,
payloadJSON []byte,
) error {
bastion := c.BastionServer
if bastion == nil {
return fmt.Errorf("bastion server is nil")
}
if bastion.PublicIPAddress == nil || strings.TrimSpace(*bastion.PublicIPAddress) == "" {
return fmt.Errorf("bastion server missing public ip")
}
privKey, err := utils.DecryptForOrg(
bastion.OrganizationID,
bastion.SshKey.EncryptedPrivateKey,
bastion.SshKey.PrivateIV,
bastion.SshKey.PrivateTag,
db,
)
if err != nil {
return fmt.Errorf("decrypt bastion key: %w", err)
}
signer, err := ssh.ParsePrivateKey([]byte(privKey))
if err != nil {
return fmt.Errorf("parse bastion private key: %w", err)
}
hkcb := makeDBHostKeyCallback(db, bastion)
config := &ssh.ClientConfig{
User: bastion.SSHUser,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: hkcb,
Timeout: 30 * time.Second,
}
host := net.JoinHostPort(*bastion.PublicIPAddress, "22")
dialer := &net.Dialer{}
conn, err := dialer.DialContext(ctx, "tcp", host)
if err != nil {
return fmt.Errorf("dial bastion: %w", err)
}
defer conn.Close()
cconn, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil {
return fmt.Errorf("ssh handshake bastion: %w", err)
}
client := ssh.NewClient(cconn, chans, reqs)
defer client.Close()
sess, err := client.NewSession()
if err != nil {
return fmt.Errorf("ssh session: %w", err)
}
defer sess.Close()
// build one shot script to:
// - mkdir ~/.ssh/autoglue/keys
// - write cluster-specific ssh-config
// - write all private keys
// - write payload.json
clusterDir := fmt.Sprintf("$HOME/autoglue/clusters/%s", c.ID.String())
configPath := fmt.Sprintf("$HOME/.ssh/autoglue/cluster-%s.config", c.ID.String())
var script bytes.Buffer
script.WriteString("set -euo pipefail\n")
script.WriteString("mkdir -p \"$HOME/.ssh/autoglue/keys\"\n")
script.WriteString("mkdir -p " + clusterDir + "\n")
script.WriteString("chmod 700 \"$HOME/.ssh\" || true\n")
// ssh-config
script.WriteString("cat > " + configPath + " <<'EOF_CFG'\n")
script.WriteString(sshConfig)
script.WriteString("EOF_CFG\n")
script.WriteString("chmod 600 " + configPath + "\n")
// keys
for id, kp := range keyPayloads {
tag := "KEY_" + id.String()
target := fmt.Sprintf("$HOME/.ssh/autoglue/keys/%s", kp.FileName)
script.WriteString("cat <<'" + tag + "' | base64 -d > " + target + "\n")
script.WriteString(kp.PrivateKeyB64 + "\n")
script.WriteString(tag + "\n")
script.WriteString("chmod 600 " + target + "\n")
}
// payload.json
payloadPath := clusterDir + "/payload.json"
script.WriteString("cat > " + payloadPath + " <<'EOF_PAYLOAD'\n")
script.Write(payloadJSON)
script.WriteString("\nEOF_PAYLOAD\n")
script.WriteString("chmod 600 " + payloadPath + "\n")
// If you later want to always include cluster configs automatically, you can
// optionally manage ~/.ssh/config here (kept simple for now).
sess.Stdin = strings.NewReader(script.String())
out, runErr := sess.CombinedOutput("bash -s")
if runErr != nil {
return wrapSSHError(runErr, string(out))
}
return nil
}
func setClusterStatus(db *gorm.DB, id uuid.UUID, status, lastError string) error {
updates := map[string]any{
"status": status,
"updated_at": time.Now(),
}
if lastError != "" {
updates["last_error"] = lastError
}
return db.Model(&models.Cluster{}).
Where("id = ?", id).
Updates(updates).Error
}
// runMakeOnBastion runs `make <target>` from the cluster's directory on the bastion.
func runMakeOnBastion(
ctx context.Context,
db *gorm.DB,
c *models.Cluster,
target string,
) (string, error) {
bastion := c.BastionServer
if bastion == nil {
return "", fmt.Errorf("bastion server is nil")
}
if bastion.PublicIPAddress == nil || strings.TrimSpace(*bastion.PublicIPAddress) == "" {
return "", fmt.Errorf("bastion server missing public ip")
}
privKey, err := utils.DecryptForOrg(
bastion.OrganizationID,
bastion.SshKey.EncryptedPrivateKey,
bastion.SshKey.PrivateIV,
bastion.SshKey.PrivateTag,
db,
)
if err != nil {
return "", fmt.Errorf("decrypt bastion key: %w", err)
}
signer, err := ssh.ParsePrivateKey([]byte(privKey))
if err != nil {
return "", fmt.Errorf("parse bastion private key: %w", err)
}
hkcb := makeDBHostKeyCallback(db, bastion)
config := &ssh.ClientConfig{
User: bastion.SSHUser,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: hkcb,
Timeout: 30 * time.Second,
}
host := net.JoinHostPort(*bastion.PublicIPAddress, "22")
dialer := &net.Dialer{}
conn, err := dialer.DialContext(ctx, "tcp", host)
if err != nil {
return "", fmt.Errorf("dial bastion: %w", err)
}
defer conn.Close()
cconn, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil {
return "", fmt.Errorf("ssh handshake bastion: %w", err)
}
client := ssh.NewClient(cconn, chans, reqs)
defer client.Close()
sess, err := client.NewSession()
if err != nil {
return "", fmt.Errorf("ssh session: %w", err)
}
defer sess.Close()
clusterDir := fmt.Sprintf("$HOME/autoglue/clusters/%s", c.ID.String())
cmd := fmt.Sprintf("cd %s && make %s", clusterDir, target)
out, runErr := sess.CombinedOutput(cmd)
if runErr != nil {
return string(out), wrapSSHError(runErr, string(out))
}
return string(out), nil
}

View File

@@ -189,6 +189,8 @@ func CreateCluster(db *gorm.DB) http.HandlerFunc {
LastError: "",
CertificateKey: certificateKey,
RandomToken: randomToken,
DockerImage: in.DockerImage,
DockerTag: in.DockerTag,
}
if err := db.Create(&c).Error; err != nil {
@@ -262,6 +264,14 @@ func UpdateCluster(db *gorm.DB) http.HandlerFunc {
cluster.Region = *in.Region
}
if in.DockerImage != nil {
cluster.DockerImage = *in.DockerImage
}
if in.DockerTag != nil {
cluster.DockerTag = *in.DockerTag
}
if err := db.Save(&cluster).Error; err != nil {
utils.WriteError(w, http.StatusInternalServerError, "db_error", "db error")
return
@@ -1547,6 +1557,8 @@ func clusterToDTO(c models.Cluster) dto.ClusterResponse {
RandomToken: c.RandomToken,
CertificateKey: c.CertificateKey,
NodePools: nps,
DockerImage: c.DockerImage,
DockerTag: c.DockerTag,
CreatedAt: c.CreatedAt,
UpdatedAt: c.UpdatedAt,
}

View File

@@ -22,6 +22,8 @@ type ClusterResponse struct {
RandomToken string `json:"random_token"`
CertificateKey string `json:"certificate_key"`
NodePools []NodePoolResponse `json:"node_pools,omitempty"`
DockerImage string `json:"docker_image"`
DockerTag string `json:"docker_tag"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
@@ -30,12 +32,16 @@ type CreateClusterRequest struct {
Name string `json:"name"`
ClusterProvider string `json:"cluster_provider"`
Region string `json:"region"`
DockerImage string `json:"docker_image"`
DockerTag string `json:"docker_tag"`
}
type UpdateClusterRequest struct {
Name *string `json:"name,omitempty"`
ClusterProvider *string `json:"cluster_provider,omitempty"`
Region *string `json:"region,omitempty"`
DockerImage *string `json:"docker_image,omitempty"`
DockerTag *string `json:"docker_tag,omitempty"`
}
type AttachCaptainDomainRequest struct {

View File

@@ -40,6 +40,8 @@ type Cluster struct {
EncryptedKubeconfig string `gorm:"type:text" json:"-"`
KubeIV string `json:"-"`
KubeTag string `json:"-"`
DockerImage string `json:"docker_image"`
DockerTag string `json:"docker_tag"`
CreatedAt time.Time `json:"created_at,omitempty" gorm:"type:timestamptz;column:created_at;not null;default:now()"`
UpdatedAt time.Time `json:"updated_at,omitempty" gorm:"type:timestamptz;autoUpdateTime;column:updated_at;not null;default:now()"`
}

View File

@@ -12,6 +12,8 @@
| `control_plane_fqdn` | string |
| `control_plane_record_set` | [DtoRecordSetResponse](DtoRecordSetResponse.md) |
| `created_at` | string |
| `docker_image` | string |
| `docker_tag` | string |
| `glueops_load_balancer` | [DtoLoadBalancerResponse](DtoLoadBalancerResponse.md) |
| `id` | string |
| `last_error` | string |
@@ -37,6 +39,8 @@ const example = {
control_plane_fqdn: null,
control_plane_record_set: null,
created_at: null,
docker_image: null,
docker_tag: null,
glueops_load_balancer: null,
id: null,
last_error: null,

View File

@@ -5,6 +5,8 @@
| Name | Type |
| ------------------ | ------ |
| `cluster_provider` | string |
| `docker_image` | string |
| `docker_tag` | string |
| `name` | string |
| `region` | string |
@@ -16,6 +18,8 @@ import type { DtoCreateClusterRequest } from "@glueops/autoglue-sdk-go";
// TODO: Update the object below with actual values
const example = {
cluster_provider: null,
docker_image: null,
docker_tag: null,
name: null,
region: null,
} satisfies DtoCreateClusterRequest;

View File

@@ -5,6 +5,8 @@
| Name | Type |
| ------------------ | ------ |
| `cluster_provider` | string |
| `docker_image` | string |
| `docker_tag` | string |
| `name` | string |
| `region` | string |
@@ -16,6 +18,8 @@ import type { DtoUpdateClusterRequest } from "@glueops/autoglue-sdk-go";
// TODO: Update the object below with actual values
const example = {
cluster_provider: null,
docker_image: null,
docker_tag: null,
name: null,
region: null,
} satisfies DtoUpdateClusterRequest;

View File

@@ -16,6 +16,6 @@
"prepare": "npm run build"
},
"devDependencies": {
"typescript": "5.9.3"
"typescript": "^4.0 || ^5.0"
}
}

View File

@@ -13,11 +13,18 @@
*/
import * as runtime from "../runtime";
import type {DtoAnnotationResponse, DtoCreateAnnotationRequest, DtoUpdateAnnotationRequest,} from "../models/index";
import type {
DtoAnnotationResponse,
DtoCreateAnnotationRequest,
DtoUpdateAnnotationRequest,
} from "../models/index";
import {
DtoAnnotationResponseFromJSON,
DtoCreateAnnotationRequestToJSON,
DtoUpdateAnnotationRequestToJSON,
DtoAnnotationResponseFromJSON,
DtoAnnotationResponseToJSON,
DtoCreateAnnotationRequestFromJSON,
DtoCreateAnnotationRequestToJSON,
DtoUpdateAnnotationRequestFromJSON,
DtoUpdateAnnotationRequestToJSON,
} from "../models/index";
export interface CreateAnnotationRequest {

View File

@@ -13,8 +13,22 @@
*/
import * as runtime from "../runtime";
import type {DtoEnqueueRequest, DtoJob, DtoPageJob, DtoQueueInfo,} from "../models/index";
import {DtoEnqueueRequestToJSON, DtoJobFromJSON, DtoPageJobFromJSON, DtoQueueInfoFromJSON,} from "../models/index";
import type {
DtoEnqueueRequest,
DtoJob,
DtoPageJob,
DtoQueueInfo,
} from "../models/index";
import {
DtoEnqueueRequestFromJSON,
DtoEnqueueRequestToJSON,
DtoJobFromJSON,
DtoJobToJSON,
DtoPageJobFromJSON,
DtoPageJobToJSON,
DtoQueueInfoFromJSON,
DtoQueueInfoToJSON,
} from "../models/index";
export interface AdminCancelArcherJobRequest {
id: string;

View File

@@ -13,13 +13,24 @@
*/
import * as runtime from "../runtime";
import type {DtoAuthStartResponse, DtoJWKS, DtoLogoutRequest, DtoRefreshRequest, DtoTokenPair,} from "../models/index";
import type {
DtoAuthStartResponse,
DtoJWKS,
DtoLogoutRequest,
DtoRefreshRequest,
DtoTokenPair,
} from "../models/index";
import {
DtoAuthStartResponseFromJSON,
DtoJWKSFromJSON,
DtoLogoutRequestToJSON,
DtoRefreshRequestToJSON,
DtoTokenPairFromJSON,
DtoAuthStartResponseFromJSON,
DtoAuthStartResponseToJSON,
DtoJWKSFromJSON,
DtoJWKSToJSON,
DtoLogoutRequestFromJSON,
DtoLogoutRequestToJSON,
DtoRefreshRequestFromJSON,
DtoRefreshRequestToJSON,
DtoTokenPairFromJSON,
DtoTokenPairToJSON,
} from "../models/index";
export interface AuthCallbackRequest {

View File

@@ -14,26 +14,35 @@
import * as runtime from "../runtime";
import type {
DtoAttachBastionRequest,
DtoAttachCaptainDomainRequest,
DtoAttachLoadBalancerRequest,
DtoAttachNodePoolRequest,
DtoAttachRecordSetRequest,
DtoClusterResponse,
DtoCreateClusterRequest,
DtoSetKubeconfigRequest,
DtoUpdateClusterRequest,
DtoAttachBastionRequest,
DtoAttachCaptainDomainRequest,
DtoAttachLoadBalancerRequest,
DtoAttachNodePoolRequest,
DtoAttachRecordSetRequest,
DtoClusterResponse,
DtoCreateClusterRequest,
DtoSetKubeconfigRequest,
DtoUpdateClusterRequest,
} from "../models/index";
import {
DtoAttachBastionRequestToJSON,
DtoAttachCaptainDomainRequestToJSON,
DtoAttachLoadBalancerRequestToJSON,
DtoAttachNodePoolRequestToJSON,
DtoAttachRecordSetRequestToJSON,
DtoClusterResponseFromJSON,
DtoCreateClusterRequestToJSON,
DtoSetKubeconfigRequestToJSON,
DtoUpdateClusterRequestToJSON,
DtoAttachBastionRequestFromJSON,
DtoAttachBastionRequestToJSON,
DtoAttachCaptainDomainRequestFromJSON,
DtoAttachCaptainDomainRequestToJSON,
DtoAttachLoadBalancerRequestFromJSON,
DtoAttachLoadBalancerRequestToJSON,
DtoAttachNodePoolRequestFromJSON,
DtoAttachNodePoolRequestToJSON,
DtoAttachRecordSetRequestFromJSON,
DtoAttachRecordSetRequestToJSON,
DtoClusterResponseFromJSON,
DtoClusterResponseToJSON,
DtoCreateClusterRequestFromJSON,
DtoCreateClusterRequestToJSON,
DtoSetKubeconfigRequestFromJSON,
DtoSetKubeconfigRequestToJSON,
DtoUpdateClusterRequestFromJSON,
DtoUpdateClusterRequestToJSON,
} from "../models/index";
export interface AttachAppsLoadBalancerRequest {

View File

@@ -13,11 +13,18 @@
*/
import * as runtime from "../runtime";
import type {DtoCreateCredentialRequest, DtoCredentialOut, DtoUpdateCredentialRequest,} from "../models/index";
import type {
DtoCreateCredentialRequest,
DtoCredentialOut,
DtoUpdateCredentialRequest,
} from "../models/index";
import {
DtoCreateCredentialRequestToJSON,
DtoCredentialOutFromJSON,
DtoUpdateCredentialRequestToJSON,
DtoCreateCredentialRequestFromJSON,
DtoCreateCredentialRequestToJSON,
DtoCredentialOutFromJSON,
DtoCredentialOutToJSON,
DtoUpdateCredentialRequestFromJSON,
DtoUpdateCredentialRequestToJSON,
} from "../models/index";
export interface CreateCredentialRequest {

View File

@@ -14,20 +14,26 @@
import * as runtime from "../runtime";
import type {
DtoCreateDomainRequest,
DtoCreateRecordSetRequest,
DtoDomainResponse,
DtoRecordSetResponse,
DtoUpdateDomainRequest,
DtoUpdateRecordSetRequest,
DtoCreateDomainRequest,
DtoCreateRecordSetRequest,
DtoDomainResponse,
DtoRecordSetResponse,
DtoUpdateDomainRequest,
DtoUpdateRecordSetRequest,
} from "../models/index";
import {
DtoCreateDomainRequestToJSON,
DtoCreateRecordSetRequestToJSON,
DtoDomainResponseFromJSON,
DtoRecordSetResponseFromJSON,
DtoUpdateDomainRequestToJSON,
DtoUpdateRecordSetRequestToJSON,
DtoCreateDomainRequestFromJSON,
DtoCreateDomainRequestToJSON,
DtoCreateRecordSetRequestFromJSON,
DtoCreateRecordSetRequestToJSON,
DtoDomainResponseFromJSON,
DtoDomainResponseToJSON,
DtoRecordSetResponseFromJSON,
DtoRecordSetResponseToJSON,
DtoUpdateDomainRequestFromJSON,
DtoUpdateDomainRequestToJSON,
DtoUpdateRecordSetRequestFromJSON,
DtoUpdateRecordSetRequestToJSON,
} from "../models/index";
export interface CreateDomainRequest {

View File

@@ -13,8 +13,11 @@
*/
import * as runtime from "../runtime";
import type {HandlersHealthStatus} from "../models/index";
import {HandlersHealthStatusFromJSON,} from "../models/index";
import type { HandlersHealthStatus } from "../models/index";
import {
HandlersHealthStatusFromJSON,
HandlersHealthStatusToJSON,
} from "../models/index";
/**
*

View File

@@ -13,8 +13,19 @@
*/
import * as runtime from "../runtime";
import type {DtoCreateLabelRequest, DtoLabelResponse, DtoUpdateLabelRequest,} from "../models/index";
import {DtoCreateLabelRequestToJSON, DtoLabelResponseFromJSON, DtoUpdateLabelRequestToJSON,} from "../models/index";
import type {
DtoCreateLabelRequest,
DtoLabelResponse,
DtoUpdateLabelRequest,
} from "../models/index";
import {
DtoCreateLabelRequestFromJSON,
DtoCreateLabelRequestToJSON,
DtoLabelResponseFromJSON,
DtoLabelResponseToJSON,
DtoUpdateLabelRequestFromJSON,
DtoUpdateLabelRequestToJSON,
} from "../models/index";
export interface CreateLabelRequest {
dtoCreateLabelRequest: DtoCreateLabelRequest;

View File

@@ -14,14 +14,17 @@
import * as runtime from "../runtime";
import type {
DtoCreateLoadBalancerRequest,
DtoLoadBalancerResponse,
DtoUpdateLoadBalancerRequest,
DtoCreateLoadBalancerRequest,
DtoLoadBalancerResponse,
DtoUpdateLoadBalancerRequest,
} from "../models/index";
import {
DtoCreateLoadBalancerRequestToJSON,
DtoLoadBalancerResponseFromJSON,
DtoUpdateLoadBalancerRequestToJSON,
DtoCreateLoadBalancerRequestFromJSON,
DtoCreateLoadBalancerRequestToJSON,
DtoLoadBalancerResponseFromJSON,
DtoLoadBalancerResponseToJSON,
DtoUpdateLoadBalancerRequestFromJSON,
DtoUpdateLoadBalancerRequestToJSON,
} from "../models/index";
export interface CreateLoadBalancerRequest {

View File

@@ -13,8 +13,16 @@
*/
import * as runtime from "../runtime";
import type {HandlersCreateUserKeyRequest, HandlersUserAPIKeyOut,} from "../models/index";
import {HandlersCreateUserKeyRequestToJSON, HandlersUserAPIKeyOutFromJSON,} from "../models/index";
import type {
HandlersCreateUserKeyRequest,
HandlersUserAPIKeyOut,
} from "../models/index";
import {
HandlersCreateUserKeyRequestFromJSON,
HandlersCreateUserKeyRequestToJSON,
HandlersUserAPIKeyOutFromJSON,
HandlersUserAPIKeyOutToJSON,
} from "../models/index";
export interface CreateUserAPIKeyRequest {
handlersCreateUserKeyRequest: HandlersCreateUserKeyRequest;

View File

@@ -13,8 +13,19 @@
*/
import * as runtime from "../runtime";
import type {HandlersMeResponse, HandlersUpdateMeRequest, ModelsUser,} from "../models/index";
import {HandlersMeResponseFromJSON, HandlersUpdateMeRequestToJSON, ModelsUserFromJSON,} from "../models/index";
import type {
HandlersMeResponse,
HandlersUpdateMeRequest,
ModelsUser,
} from "../models/index";
import {
HandlersMeResponseFromJSON,
HandlersMeResponseToJSON,
HandlersUpdateMeRequestFromJSON,
HandlersUpdateMeRequestToJSON,
ModelsUserFromJSON,
ModelsUserToJSON,
} from "../models/index";
export interface UpdateMeRequest {
handlersUpdateMeRequest: HandlersUpdateMeRequest;

View File

@@ -13,8 +13,11 @@
*/
import * as runtime from "../runtime";
import type {HandlersVersionResponse} from "../models/index";
import {HandlersVersionResponseFromJSON,} from "../models/index";
import type { HandlersVersionResponse } from "../models/index";
import {
HandlersVersionResponseFromJSON,
HandlersVersionResponseToJSON,
} from "../models/index";
/**
*

View File

@@ -14,30 +14,41 @@
import * as runtime from "../runtime";
import type {
DtoAnnotationResponse,
DtoAttachAnnotationsRequest,
DtoAttachLabelsRequest,
DtoAttachServersRequest,
DtoAttachTaintsRequest,
DtoCreateNodePoolRequest,
DtoLabelResponse,
DtoNodePoolResponse,
DtoServerResponse,
DtoTaintResponse,
DtoUpdateNodePoolRequest,
DtoAnnotationResponse,
DtoAttachAnnotationsRequest,
DtoAttachLabelsRequest,
DtoAttachServersRequest,
DtoAttachTaintsRequest,
DtoCreateNodePoolRequest,
DtoLabelResponse,
DtoNodePoolResponse,
DtoServerResponse,
DtoTaintResponse,
DtoUpdateNodePoolRequest,
} from "../models/index";
import {
DtoAnnotationResponseFromJSON,
DtoAttachAnnotationsRequestToJSON,
DtoAttachLabelsRequestToJSON,
DtoAttachServersRequestToJSON,
DtoAttachTaintsRequestToJSON,
DtoCreateNodePoolRequestToJSON,
DtoLabelResponseFromJSON,
DtoNodePoolResponseFromJSON,
DtoServerResponseFromJSON,
DtoTaintResponseFromJSON,
DtoUpdateNodePoolRequestToJSON,
DtoAnnotationResponseFromJSON,
DtoAnnotationResponseToJSON,
DtoAttachAnnotationsRequestFromJSON,
DtoAttachAnnotationsRequestToJSON,
DtoAttachLabelsRequestFromJSON,
DtoAttachLabelsRequestToJSON,
DtoAttachServersRequestFromJSON,
DtoAttachServersRequestToJSON,
DtoAttachTaintsRequestFromJSON,
DtoAttachTaintsRequestToJSON,
DtoCreateNodePoolRequestFromJSON,
DtoCreateNodePoolRequestToJSON,
DtoLabelResponseFromJSON,
DtoLabelResponseToJSON,
DtoNodePoolResponseFromJSON,
DtoNodePoolResponseToJSON,
DtoServerResponseFromJSON,
DtoServerResponseToJSON,
DtoTaintResponseFromJSON,
DtoTaintResponseToJSON,
DtoUpdateNodePoolRequestFromJSON,
DtoUpdateNodePoolRequestToJSON,
} from "../models/index";
export interface AttachNodePoolAnnotationsRequest {

View File

@@ -14,24 +14,35 @@
import * as runtime from "../runtime";
import type {
HandlersMemberOut,
HandlersMemberUpsertReq,
HandlersOrgCreateReq,
HandlersOrgKeyCreateReq,
HandlersOrgKeyCreateResp,
HandlersOrgUpdateReq,
ModelsAPIKey,
ModelsOrganization,
HandlersMemberOut,
HandlersMemberUpsertReq,
HandlersOrgCreateReq,
HandlersOrgKeyCreateReq,
HandlersOrgKeyCreateResp,
HandlersOrgUpdateReq,
ModelsAPIKey,
ModelsOrganization,
UtilsErrorResponse,
} from "../models/index";
import {
HandlersMemberOutFromJSON,
HandlersMemberUpsertReqToJSON,
HandlersOrgCreateReqToJSON,
HandlersOrgKeyCreateReqToJSON,
HandlersOrgKeyCreateRespFromJSON,
HandlersOrgUpdateReqToJSON,
ModelsAPIKeyFromJSON,
ModelsOrganizationFromJSON,
HandlersMemberOutFromJSON,
HandlersMemberOutToJSON,
HandlersMemberUpsertReqFromJSON,
HandlersMemberUpsertReqToJSON,
HandlersOrgCreateReqFromJSON,
HandlersOrgCreateReqToJSON,
HandlersOrgKeyCreateReqFromJSON,
HandlersOrgKeyCreateReqToJSON,
HandlersOrgKeyCreateRespFromJSON,
HandlersOrgKeyCreateRespToJSON,
HandlersOrgUpdateReqFromJSON,
HandlersOrgUpdateReqToJSON,
ModelsAPIKeyFromJSON,
ModelsAPIKeyToJSON,
ModelsOrganizationFromJSON,
ModelsOrganizationToJSON,
UtilsErrorResponseFromJSON,
UtilsErrorResponseToJSON,
} from "../models/index";
export interface AddOrUpdateMemberRequest {

View File

@@ -13,8 +13,19 @@
*/
import * as runtime from "../runtime";
import type {DtoCreateServerRequest, DtoServerResponse, DtoUpdateServerRequest,} from "../models/index";
import {DtoCreateServerRequestToJSON, DtoServerResponseFromJSON, DtoUpdateServerRequestToJSON,} from "../models/index";
import type {
DtoCreateServerRequest,
DtoServerResponse,
DtoUpdateServerRequest,
} from "../models/index";
import {
DtoCreateServerRequestFromJSON,
DtoCreateServerRequestToJSON,
DtoServerResponseFromJSON,
DtoServerResponseToJSON,
DtoUpdateServerRequestFromJSON,
DtoUpdateServerRequestToJSON,
} from "../models/index";
export interface CreateServerRequest {
dtoCreateServerRequest: DtoCreateServerRequest;

View File

@@ -13,8 +13,19 @@
*/
import * as runtime from "../runtime";
import type {DtoCreateSSHRequest, DtoSshResponse, GetSSHKey200Response,} from "../models/index";
import {DtoCreateSSHRequestToJSON, DtoSshResponseFromJSON, GetSSHKey200ResponseFromJSON,} from "../models/index";
import type {
DtoCreateSSHRequest,
DtoSshResponse,
GetSSHKey200Response,
} from "../models/index";
import {
DtoCreateSSHRequestFromJSON,
DtoCreateSSHRequestToJSON,
DtoSshResponseFromJSON,
DtoSshResponseToJSON,
GetSSHKey200ResponseFromJSON,
GetSSHKey200ResponseToJSON,
} from "../models/index";
export interface CreateSSHKeyRequest {
dtoCreateSSHRequest: DtoCreateSSHRequest;

View File

@@ -13,8 +13,19 @@
*/
import * as runtime from "../runtime";
import type {DtoCreateTaintRequest, DtoTaintResponse, DtoUpdateTaintRequest,} from "../models/index";
import {DtoCreateTaintRequestToJSON, DtoTaintResponseFromJSON, DtoUpdateTaintRequestToJSON,} from "../models/index";
import type {
DtoCreateTaintRequest,
DtoTaintResponse,
DtoUpdateTaintRequest,
} from "../models/index";
import {
DtoCreateTaintRequestFromJSON,
DtoCreateTaintRequestToJSON,
DtoTaintResponseFromJSON,
DtoTaintResponseToJSON,
DtoUpdateTaintRequestFromJSON,
DtoUpdateTaintRequestToJSON,
} from "../models/index";
export interface CreateTaintRequest {
dtoCreateTaintRequest: DtoCreateTaintRequest;

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,16 +12,42 @@
* Do not edit the class manually.
*/
import type {DtoDomainResponse} from "./DtoDomainResponse";
import {DtoDomainResponseFromJSON, DtoDomainResponseToJSON,} from "./DtoDomainResponse";
import type {DtoLoadBalancerResponse} from "./DtoLoadBalancerResponse";
import {DtoLoadBalancerResponseFromJSON, DtoLoadBalancerResponseToJSON,} from "./DtoLoadBalancerResponse";
import type {DtoNodePoolResponse} from "./DtoNodePoolResponse";
import {DtoNodePoolResponseFromJSON, DtoNodePoolResponseToJSON,} from "./DtoNodePoolResponse";
import type {DtoServerResponse} from "./DtoServerResponse";
import {DtoServerResponseFromJSON, DtoServerResponseToJSON,} from "./DtoServerResponse";
import type {DtoRecordSetResponse} from "./DtoRecordSetResponse";
import {DtoRecordSetResponseFromJSON, DtoRecordSetResponseToJSON,} from "./DtoRecordSetResponse";
import { mapValues } from "../runtime";
import type { DtoDomainResponse } from "./DtoDomainResponse";
import {
DtoDomainResponseFromJSON,
DtoDomainResponseFromJSONTyped,
DtoDomainResponseToJSON,
DtoDomainResponseToJSONTyped,
} from "./DtoDomainResponse";
import type { DtoLoadBalancerResponse } from "./DtoLoadBalancerResponse";
import {
DtoLoadBalancerResponseFromJSON,
DtoLoadBalancerResponseFromJSONTyped,
DtoLoadBalancerResponseToJSON,
DtoLoadBalancerResponseToJSONTyped,
} from "./DtoLoadBalancerResponse";
import type { DtoNodePoolResponse } from "./DtoNodePoolResponse";
import {
DtoNodePoolResponseFromJSON,
DtoNodePoolResponseFromJSONTyped,
DtoNodePoolResponseToJSON,
DtoNodePoolResponseToJSONTyped,
} from "./DtoNodePoolResponse";
import type { DtoServerResponse } from "./DtoServerResponse";
import {
DtoServerResponseFromJSON,
DtoServerResponseFromJSONTyped,
DtoServerResponseToJSON,
DtoServerResponseToJSONTyped,
} from "./DtoServerResponse";
import type { DtoRecordSetResponse } from "./DtoRecordSetResponse";
import {
DtoRecordSetResponseFromJSON,
DtoRecordSetResponseFromJSONTyped,
DtoRecordSetResponseToJSON,
DtoRecordSetResponseToJSONTyped,
} from "./DtoRecordSetResponse";
/**
*
@@ -77,6 +103,18 @@ export interface DtoClusterResponse {
* @memberof DtoClusterResponse
*/
created_at?: string;
/**
*
* @type {string}
* @memberof DtoClusterResponse
*/
docker_image?: string;
/**
*
* @type {string}
* @memberof DtoClusterResponse
*/
docker_tag?: string;
/**
*
* @type {DtoLoadBalancerResponse}
@@ -179,6 +217,9 @@ export function DtoClusterResponseFromJSONTyped(
? undefined
: DtoRecordSetResponseFromJSON(json["control_plane_record_set"]),
created_at: json["created_at"] == null ? undefined : json["created_at"],
docker_image:
json["docker_image"] == null ? undefined : json["docker_image"],
docker_tag: json["docker_tag"] == null ? undefined : json["docker_tag"],
glueops_load_balancer:
json["glueops_load_balancer"] == null
? undefined
@@ -223,6 +264,8 @@ export function DtoClusterResponseToJSONTyped(
value["control_plane_record_set"],
),
created_at: value["created_at"],
docker_image: value["docker_image"],
docker_tag: value["docker_tag"],
glueops_load_balancer: DtoLoadBalancerResponseToJSON(
value["glueops_load_balancer"],
),

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export
@@ -24,6 +25,18 @@ export interface DtoCreateClusterRequest {
* @memberof DtoCreateClusterRequest
*/
cluster_provider?: string;
/**
*
* @type {string}
* @memberof DtoCreateClusterRequest
*/
docker_image?: string;
/**
*
* @type {string}
* @memberof DtoCreateClusterRequest
*/
docker_tag?: string;
/**
*
* @type {string}
@@ -63,6 +76,9 @@ export function DtoCreateClusterRequestFromJSONTyped(
return {
cluster_provider:
json["cluster_provider"] == null ? undefined : json["cluster_provider"],
docker_image:
json["docker_image"] == null ? undefined : json["docker_image"],
docker_tag: json["docker_tag"] == null ? undefined : json["docker_tag"],
name: json["name"] == null ? undefined : json["name"],
region: json["region"] == null ? undefined : json["region"],
};
@@ -84,6 +100,8 @@ export function DtoCreateClusterRequestToJSONTyped(
return {
cluster_provider: value["cluster_provider"],
docker_image: value["docker_image"],
docker_tag: value["docker_tag"],
name: value["name"],
region: value["region"],
};

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,8 +12,14 @@
* Do not edit the class manually.
*/
import type {DtoJWK} from "./DtoJWK";
import {DtoJWKFromJSON, DtoJWKToJSON,} from "./DtoJWK";
import { mapValues } from "../runtime";
import type { DtoJWK } from "./DtoJWK";
import {
DtoJWKFromJSON,
DtoJWKFromJSONTyped,
DtoJWKToJSON,
DtoJWKToJSONTyped,
} from "./DtoJWK";
/**
*

View File

@@ -12,8 +12,14 @@
* Do not edit the class manually.
*/
import type {DtoJobStatus} from "./DtoJobStatus";
import {DtoJobStatusFromJSON, DtoJobStatusToJSON,} from "./DtoJobStatus";
import { mapValues } from "../runtime";
import type { DtoJobStatus } from "./DtoJobStatus";
import {
DtoJobStatusFromJSON,
DtoJobStatusFromJSONTyped,
DtoJobStatusToJSON,
DtoJobStatusToJSONTyped,
} from "./DtoJobStatus";
/**
*

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,14 +12,35 @@
* Do not edit the class manually.
*/
import type {DtoTaintResponse} from "./DtoTaintResponse";
import {DtoTaintResponseFromJSON, DtoTaintResponseToJSON,} from "./DtoTaintResponse";
import type {DtoLabelResponse} from "./DtoLabelResponse";
import {DtoLabelResponseFromJSON, DtoLabelResponseToJSON,} from "./DtoLabelResponse";
import type {DtoServerResponse} from "./DtoServerResponse";
import {DtoServerResponseFromJSON, DtoServerResponseToJSON,} from "./DtoServerResponse";
import type {DtoAnnotationResponse} from "./DtoAnnotationResponse";
import {DtoAnnotationResponseFromJSON, DtoAnnotationResponseToJSON,} from "./DtoAnnotationResponse";
import { mapValues } from "../runtime";
import type { DtoTaintResponse } from "./DtoTaintResponse";
import {
DtoTaintResponseFromJSON,
DtoTaintResponseFromJSONTyped,
DtoTaintResponseToJSON,
DtoTaintResponseToJSONTyped,
} from "./DtoTaintResponse";
import type { DtoLabelResponse } from "./DtoLabelResponse";
import {
DtoLabelResponseFromJSON,
DtoLabelResponseFromJSONTyped,
DtoLabelResponseToJSON,
DtoLabelResponseToJSONTyped,
} from "./DtoLabelResponse";
import type { DtoServerResponse } from "./DtoServerResponse";
import {
DtoServerResponseFromJSON,
DtoServerResponseFromJSONTyped,
DtoServerResponseToJSON,
DtoServerResponseToJSONTyped,
} from "./DtoServerResponse";
import type { DtoAnnotationResponse } from "./DtoAnnotationResponse";
import {
DtoAnnotationResponseFromJSON,
DtoAnnotationResponseFromJSONTyped,
DtoAnnotationResponseToJSON,
DtoAnnotationResponseToJSONTyped,
} from "./DtoAnnotationResponse";
/**
*

View File

@@ -12,8 +12,14 @@
* Do not edit the class manually.
*/
import type {DtoJob} from "./DtoJob";
import {DtoJobFromJSON, DtoJobToJSON,} from "./DtoJob";
import { mapValues } from "../runtime";
import type { DtoJob } from "./DtoJob";
import {
DtoJobFromJSON,
DtoJobFromJSONTyped,
DtoJobToJSON,
DtoJobToJSONTyped,
} from "./DtoJob";
/**
*

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export
@@ -24,6 +25,18 @@ export interface DtoUpdateClusterRequest {
* @memberof DtoUpdateClusterRequest
*/
cluster_provider?: string;
/**
*
* @type {string}
* @memberof DtoUpdateClusterRequest
*/
docker_image?: string;
/**
*
* @type {string}
* @memberof DtoUpdateClusterRequest
*/
docker_tag?: string;
/**
*
* @type {string}
@@ -63,6 +76,9 @@ export function DtoUpdateClusterRequestFromJSONTyped(
return {
cluster_provider:
json["cluster_provider"] == null ? undefined : json["cluster_provider"],
docker_image:
json["docker_image"] == null ? undefined : json["docker_image"],
docker_tag: json["docker_tag"] == null ? undefined : json["docker_tag"],
name: json["name"] == null ? undefined : json["name"],
region: json["region"] == null ? undefined : json["region"],
};
@@ -84,6 +100,8 @@ export function DtoUpdateClusterRequestToJSONTyped(
return {
cluster_provider: value["cluster_provider"],
docker_image: value["docker_image"],
docker_tag: value["docker_tag"],
name: value["name"],
region: value["region"],
};

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,13 +12,19 @@
* Do not edit the class manually.
*/
import type {DtoSshResponse} from "./DtoSshResponse";
import {DtoSshResponseFromJSONTyped, DtoSshResponseToJSON, instanceOfDtoSshResponse,} from "./DtoSshResponse";
import type {DtoSshRevealResponse} from "./DtoSshRevealResponse";
import type { DtoSshResponse } from "./DtoSshResponse";
import {
DtoSshRevealResponseFromJSONTyped,
DtoSshRevealResponseToJSON,
instanceOfDtoSshRevealResponse,
instanceOfDtoSshResponse,
DtoSshResponseFromJSON,
DtoSshResponseFromJSONTyped,
DtoSshResponseToJSON,
} from "./DtoSshResponse";
import type { DtoSshRevealResponse } from "./DtoSshRevealResponse";
import {
instanceOfDtoSshRevealResponse,
DtoSshRevealResponseFromJSON,
DtoSshRevealResponseFromJSONTyped,
DtoSshRevealResponseToJSON,
} from "./DtoSshRevealResponse";
/**

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,10 +12,21 @@
* Do not edit the class manually.
*/
import type {ModelsUserEmail} from "./ModelsUserEmail";
import {ModelsUserEmailFromJSON, ModelsUserEmailToJSON,} from "./ModelsUserEmail";
import type {ModelsOrganization} from "./ModelsOrganization";
import {ModelsOrganizationFromJSON, ModelsOrganizationToJSON,} from "./ModelsOrganization";
import { mapValues } from "../runtime";
import type { ModelsUserEmail } from "./ModelsUserEmail";
import {
ModelsUserEmailFromJSON,
ModelsUserEmailFromJSONTyped,
ModelsUserEmailToJSON,
ModelsUserEmailToJSONTyped,
} from "./ModelsUserEmail";
import type { ModelsOrganization } from "./ModelsOrganization";
import {
ModelsOrganizationFromJSON,
ModelsOrganizationFromJSONTyped,
ModelsOrganizationToJSON,
ModelsOrganizationToJSONTyped,
} from "./ModelsOrganization";
/**
*

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

View File

@@ -12,6 +12,7 @@
* Do not edit the class manually.
*/
import { mapValues } from "../runtime";
/**
*
* @export

Some files were not shown because too many files have changed in this diff Show More