mirror of
https://github.com/GlueOps/autoglue.git
synced 2026-02-13 04:40:05 +01:00
feat: complete node pool api, sdk and ui
Signed-off-by: allanice001 <allanice001@gmail.com>
This commit is contained in:
@@ -79,15 +79,17 @@ func NewRouter(db *gorm.DB, jobs *bg.Jobs) http.Handler {
|
||||
a.Post("/logout", handlers.Logout(db))
|
||||
})
|
||||
|
||||
v1.Route("/admin/archer", func(a chi.Router) {
|
||||
a.Use(authUser)
|
||||
a.Use(httpmiddleware.RequirePlatformAdmin())
|
||||
v1.Route("/admin", func(admin chi.Router) {
|
||||
admin.Route("/archer", func(archer chi.Router) {
|
||||
archer.Use(authUser)
|
||||
archer.Use(httpmiddleware.RequirePlatformAdmin())
|
||||
|
||||
a.Get("/jobs", handlers.AdminListArcherJobs(db))
|
||||
a.Post("/jobs", handlers.AdminEnqueueArcherJob(db, jobs))
|
||||
a.Post("/jobs/{id}/retry", handlers.AdminRetryArcherJob(db))
|
||||
a.Post("/jobs/{id}/cancel", handlers.AdminCancelArcherJob(db))
|
||||
a.Get("/queues", handlers.AdminListArcherQueues(db))
|
||||
archer.Get("/jobs", handlers.AdminListArcherJobs(db))
|
||||
archer.Post("/jobs", handlers.AdminEnqueueArcherJob(db, jobs))
|
||||
archer.Post("/jobs/{id}/retry", handlers.AdminRetryArcherJob(db))
|
||||
archer.Post("/jobs/{id}/cancel", handlers.AdminCancelArcherJob(db))
|
||||
archer.Get("/queues", handlers.AdminListArcherQueues(db))
|
||||
})
|
||||
})
|
||||
|
||||
v1.Route("/me", func(me chi.Router) {
|
||||
@@ -168,6 +170,35 @@ func NewRouter(db *gorm.DB, jobs *bg.Jobs) http.Handler {
|
||||
a.Patch("/{id}", handlers.UpdateAnnotation(db))
|
||||
a.Delete("/{id}", handlers.DeleteAnnotation(db))
|
||||
})
|
||||
|
||||
v1.Route("/node-pools", func(n chi.Router) {
|
||||
n.Use(authOrg)
|
||||
n.Get("/", handlers.ListNodePools(db))
|
||||
n.Post("/", handlers.CreateNodePool(db))
|
||||
n.Get("/{id}", handlers.GetNodePool(db))
|
||||
n.Patch("/{id}", handlers.UpdateNodePool(db))
|
||||
n.Delete("/{id}", handlers.DeleteNodePool(db))
|
||||
|
||||
// Servers
|
||||
n.Get("/{id}/servers", handlers.ListNodePoolServers(db))
|
||||
n.Post("/{id}/servers", handlers.AttachNodePoolServers(db))
|
||||
n.Delete("/{id}/servers/{serverId}", handlers.DetachNodePoolServer(db))
|
||||
|
||||
// Taints
|
||||
n.Get("/{id}/taints", handlers.ListNodePoolTaints(db))
|
||||
n.Post("/{id}/taints", handlers.AttachNodePoolTaints(db))
|
||||
n.Delete("/{id}/taints/{taintId}", handlers.DetachNodePoolTaint(db))
|
||||
|
||||
// Labels
|
||||
n.Get("/{id}/labels", handlers.ListNodePoolLabels(db))
|
||||
n.Post("/{id}/labels", handlers.AttachNodePoolLabels(db))
|
||||
n.Delete("/{id}/labels/{labelId}", handlers.DetachNodePoolLabel(db))
|
||||
|
||||
// Annotations
|
||||
n.Get("/{id}/annotations", handlers.ListNodePoolAnnotations(db))
|
||||
n.Post("/{id}/annotations", handlers.AttachNodePoolAnnotations(db))
|
||||
n.Delete("/{id}/annotations/{annotationId}", handlers.DetachNodePoolAnnotation(db))
|
||||
})
|
||||
})
|
||||
})
|
||||
if config.IsDebug() {
|
||||
|
||||
@@ -38,7 +38,10 @@ func NewRuntime() *Runtime {
|
||||
&models.Taint{},
|
||||
&models.Label{},
|
||||
&models.Annotation{},
|
||||
&models.NodePool{},
|
||||
&models.Cluster{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing database: %v", err)
|
||||
}
|
||||
|
||||
@@ -225,19 +225,242 @@ func sshInstallDockerWithOutput(ctx context.Context, host, user string, privateK
|
||||
script := `
|
||||
set -euxo pipefail
|
||||
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
# ----------- toggles (set to 0 to skip) -----------
|
||||
: "${BASELINE_PKGS:=1}"
|
||||
: "${INSTALL_DOCKER:=1}"
|
||||
: "${SSH_HARDEN:=1}"
|
||||
: "${FIREWALL:=1}"
|
||||
: "${AUTO_UPDATES:=1}"
|
||||
: "${TIME_SYNC:=1}"
|
||||
: "${FAIL2BAN:=1}"
|
||||
: "${BANNER:=1}"
|
||||
|
||||
# ----------- helpers -----------
|
||||
have() { command -v "$1" >/dev/null 2>&1; }
|
||||
|
||||
pm=""
|
||||
if have apt-get; then pm="apt"
|
||||
elif have dnf; then pm="dnf"
|
||||
elif have yum; then pm="yum"
|
||||
elif have zypper; then pm="zypper"
|
||||
elif have apk; then pm="apk"
|
||||
fi
|
||||
|
||||
# try to enable/start (handles distros with systemd)
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
sudo systemctl enable --now docker || true
|
||||
pm_update_install() {
|
||||
case "$pm" in
|
||||
apt)
|
||||
sudo apt-get update -y
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "$@"
|
||||
;;
|
||||
dnf) sudo dnf install -y "$@" ;;
|
||||
yum) sudo yum install -y "$@" ;;
|
||||
zypper) sudo zypper --non-interactive install -y "$@" || true ;;
|
||||
apk) sudo apk add --no-cache "$@" ;;
|
||||
*)
|
||||
echo "Unsupported distro: couldn't detect package manager" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
systemd_enable_now() {
|
||||
if have systemctl; then
|
||||
sudo systemctl enable --now "$1" || true
|
||||
fi
|
||||
}
|
||||
|
||||
sshd_reload() {
|
||||
if have systemctl && systemctl is-enabled ssh >/dev/null 2>&1; then
|
||||
sudo systemctl reload ssh || true
|
||||
elif have systemctl && systemctl is-enabled sshd >/dev/null 2>&1; then
|
||||
sudo systemctl reload sshd || true
|
||||
fi
|
||||
}
|
||||
|
||||
# ----------- baseline packages -----------
|
||||
if [ "$BASELINE_PKGS" = "1" ] && [ -n "$pm" ]; then
|
||||
pkgs_common="curl ca-certificates gnupg git jq unzip tar vim tmux htop net-tools"
|
||||
case "$pm" in
|
||||
apt) pkgs="$pkgs_common ufw openssh-client" ;;
|
||||
dnf|yum) pkgs="$pkgs_common firewalld openssh-clients" ;;
|
||||
zypper) pkgs="$pkgs_common firewalld openssh" ;;
|
||||
apk) pkgs="$pkgs_common openssh-client" ;;
|
||||
esac
|
||||
pm_update_install $pkgs || true
|
||||
fi
|
||||
|
||||
# add current ssh user to docker group if exists
|
||||
if getent group docker >/dev/null 2>&1; then
|
||||
sudo usermod -aG docker "$(id -un)" || true
|
||||
# ----------- docker & compose v2 -----------
|
||||
if [ "$INSTALL_DOCKER" = "1" ]; then
|
||||
if ! have docker; then
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
fi
|
||||
|
||||
# try to enable/start (handles distros with systemd)
|
||||
if have systemctl; then
|
||||
sudo systemctl enable --now docker || true
|
||||
fi
|
||||
|
||||
# add current ssh user to docker group if exists
|
||||
if getent group docker >/dev/null 2>&1; then
|
||||
sudo usermod -aG docker "$(id -un)" || true
|
||||
fi
|
||||
|
||||
# docker compose v2 (plugin) if missing
|
||||
if ! docker compose version >/dev/null 2>&1; then
|
||||
# Try package first (Debian/Ubuntu name)
|
||||
if [ "$pm" = "apt" ]; then
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y docker-compose-plugin || true
|
||||
fi
|
||||
|
||||
# Fallback: install static plugin binary under ~/.docker/cli-plugins
|
||||
if ! docker compose version >/dev/null 2>&1; then
|
||||
mkdir -p ~/.docker/cli-plugins
|
||||
arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
x86_64|amd64) arch="x86_64" ;;
|
||||
aarch64|arm64) arch="aarch64" ;;
|
||||
esac
|
||||
curl -fsSL -o ~/.docker/cli-plugins/docker-compose \
|
||||
"https://github.com/docker/compose/releases/download/v2.29.7/docker-compose-$(uname -s)-$arch"
|
||||
chmod +x ~/.docker/cli-plugins/docker-compose
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# ----------- SSH hardening (non-destructive: separate conf file) -----------
|
||||
if [ "$SSH_HARDEN" = "1" ]; then
|
||||
confd="/etc/ssh/sshd_config.d"
|
||||
if [ -d "$confd" ] && [ -w "$confd" ]; then
|
||||
sudo tee "$confd/10-bastion.conf" >/dev/null <<'EOF'
|
||||
# Bastion hardening
|
||||
PasswordAuthentication no
|
||||
ChallengeResponseAuthentication no
|
||||
KbdInteractiveAuthentication no
|
||||
UsePAM yes
|
||||
PermitEmptyPasswords no
|
||||
PubkeyAuthentication yes
|
||||
ClientAliveInterval 300
|
||||
ClientAliveCountMax 2
|
||||
LoginGraceTime 20
|
||||
MaxAuthTries 3
|
||||
MaxSessions 10
|
||||
AllowAgentForwarding no
|
||||
X11Forwarding no
|
||||
EOF
|
||||
sshd_reload
|
||||
else
|
||||
echo "Skipping SSH hardening: $confd not present or not writable" >&2
|
||||
fi
|
||||
|
||||
# lock root password (no effect if already locked)
|
||||
if have passwd; then
|
||||
sudo passwd -l root || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# ----------- firewall -----------
|
||||
if [ "$FIREWALL" = "1" ]; then
|
||||
if have ufw; then
|
||||
# Keep it minimal: allow SSH and rate-limit
|
||||
sudo ufw --force reset || true
|
||||
sudo ufw default deny incoming
|
||||
sudo ufw default allow outgoing
|
||||
sudo ufw allow OpenSSH || sudo ufw allow 22/tcp
|
||||
sudo ufw limit OpenSSH || true
|
||||
sudo ufw --force enable
|
||||
elif have firewall-cmd; then
|
||||
systemd_enable_now firewalld
|
||||
sudo firewall-cmd --permanent --add-service=ssh || sudo firewall-cmd --permanent --add-port=22/tcp
|
||||
sudo firewall-cmd --reload || true
|
||||
else
|
||||
echo "No supported firewall tool detected; skipping." >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
# ----------- unattended / automatic updates -----------
|
||||
if [ "$AUTO_UPDATES" = "1" ] && [ -n "$pm" ]; then
|
||||
case "$pm" in
|
||||
apt)
|
||||
pm_update_install unattended-upgrades apt-listchanges || true
|
||||
sudo dpkg-reconfigure -f noninteractive unattended-upgrades || true
|
||||
sudo tee /etc/apt/apt.conf.d/20auto-upgrades >/dev/null <<'EOF'
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
EOF
|
||||
;;
|
||||
dnf)
|
||||
pm_update_install dnf-automatic || true
|
||||
sudo sed -i 's/^apply_updates = .*/apply_updates = yes/' /etc/dnf/automatic.conf || true
|
||||
systemd_enable_now dnf-automatic.timer
|
||||
;;
|
||||
yum)
|
||||
pm_update_install yum-cron || true
|
||||
sudo sed -i 's/apply_updates = no/apply_updates = yes/' /etc/yum/yum-cron.conf || true
|
||||
systemd_enable_now yum-cron
|
||||
;;
|
||||
zypper)
|
||||
pm_update_install pkgconf-pkg-config || true
|
||||
# SUSE has automatic updates via transactional-update / yast2-online-update; skipping heavy config.
|
||||
;;
|
||||
apk)
|
||||
# Alpine: no official unattended updater; consider periodic 'apk upgrade' via cron (skipped by default).
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# ----------- time sync -----------
|
||||
if [ "$TIME_SYNC" = "1" ]; then
|
||||
if have timedatectl; then
|
||||
# Prefer systemd-timesyncd if available; else install/enable chrony
|
||||
if [ -f /lib/systemd/system/systemd-timesyncd.service ] || [ -f /usr/lib/systemd/system/systemd-timesyncd.service ]; then
|
||||
systemd_enable_now systemd-timesyncd
|
||||
else
|
||||
pm_update_install chrony || true
|
||||
systemd_enable_now chronyd || systemd_enable_now chrony || true
|
||||
fi
|
||||
timedatectl set-ntp true || true
|
||||
else
|
||||
pm_update_install chrony || true
|
||||
systemd_enable_now chronyd || systemd_enable_now chrony || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# ----------- fail2ban (basic sshd jail) -----------
|
||||
if [ "$FAIL2BAN" = "1" ]; then
|
||||
pm_update_install fail2ban || true
|
||||
if [ -d /etc/fail2ban ]; then
|
||||
sudo tee /etc/fail2ban/jail.d/sshd.local >/dev/null <<'EOF'
|
||||
[sshd]
|
||||
enabled = true
|
||||
port = ssh
|
||||
logpath = %(sshd_log)s
|
||||
maxretry = 4
|
||||
bantime = 1h
|
||||
findtime = 10m
|
||||
EOF
|
||||
systemd_enable_now fail2ban
|
||||
fi
|
||||
fi
|
||||
|
||||
# ----------- SSH banner / MOTD -----------
|
||||
if [ "$BANNER" = "1" ]; then
|
||||
if [ -w /etc/issue.net ] || sudo test -w /etc/issue.net; then
|
||||
sudo tee /etc/issue.net >/dev/null <<'EOF'
|
||||
NOTICE: Authorized use only. Activity may be monitored and reported.
|
||||
EOF
|
||||
# Ensure banner is enabled via our bastion conf
|
||||
if [ -d /etc/ssh/sshd_config.d ]; then
|
||||
if ! grep -q '^Banner ' /etc/ssh/sshd_config.d/10-bastion.conf 2>/dev/null; then
|
||||
echo 'Banner /etc/issue.net' | sudo tee -a /etc/ssh/sshd_config.d/10-bastion.conf >/dev/null
|
||||
sshd_reload
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Bootstrap complete. If you were added to the docker group, log out and back in to apply."
|
||||
`
|
||||
|
||||
// Send script via stdin to avoid quoting/escaping issues
|
||||
|
||||
46
internal/handlers/dto/node_pools.go
Normal file
46
internal/handlers/dto/node_pools.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package dto
|
||||
|
||||
import "github.com/glueops/autoglue/internal/common"
|
||||
|
||||
type NodeRole string
|
||||
|
||||
const (
|
||||
NodeRoleMaster NodeRole = "master"
|
||||
NodeRoleWorker NodeRole = "worker"
|
||||
)
|
||||
|
||||
type CreateNodePoolRequest struct {
|
||||
Name string `json:"name"`
|
||||
Role NodeRole `json:"role" enums:"master,worker" swaggertype:"string"`
|
||||
}
|
||||
|
||||
type UpdateNodePoolRequest struct {
|
||||
Name *string `json:"name"`
|
||||
Role *NodeRole `json:"role" enums:"master,worker" swaggertype:"string"`
|
||||
}
|
||||
|
||||
type NodePoolResponse struct {
|
||||
common.AuditFields
|
||||
Name string `json:"name"`
|
||||
Role NodeRole `json:"role" enums:"master,worker" swaggertype:"string"`
|
||||
Servers []ServerResponse `json:"servers"`
|
||||
Annotations []AnnotationResponse `json:"annotations"`
|
||||
Labels []LabelResponse `json:"labels"`
|
||||
Taints []TaintResponse `json:"taints"`
|
||||
}
|
||||
|
||||
type AttachServersRequest struct {
|
||||
ServerIDs []string `json:"server_ids"`
|
||||
}
|
||||
|
||||
type AttachTaintsRequest struct {
|
||||
TaintIDs []string `json:"taint_ids"`
|
||||
}
|
||||
|
||||
type AttachLabelsRequest struct {
|
||||
LabelIDs []string `json:"label_ids"`
|
||||
}
|
||||
|
||||
type AttachAnnotationsRequest struct {
|
||||
AnnotationIDs []string `json:"annotation_ids"`
|
||||
}
|
||||
@@ -8,8 +8,8 @@ type CreateServerRequest struct {
|
||||
PrivateIPAddress string `json:"private_ip_address"`
|
||||
SSHUser string `json:"ssh_user"`
|
||||
SshKeyID string `json:"ssh_key_id"`
|
||||
Role string `json:"role" example:"master|worker|bastion"`
|
||||
Status string `json:"status,omitempty" example:"pending|provisioning|ready|failed"`
|
||||
Role string `json:"role" example:"master|worker|bastion" enums:"master,worker,bastion"`
|
||||
Status string `json:"status,omitempty" example:"pending|provisioning|ready|failed" enums:"pending,provisioning,ready,failed"`
|
||||
}
|
||||
|
||||
type UpdateServerRequest struct {
|
||||
@@ -18,8 +18,8 @@ type UpdateServerRequest struct {
|
||||
PrivateIPAddress *string `json:"private_ip_address,omitempty"`
|
||||
SSHUser *string `json:"ssh_user,omitempty"`
|
||||
SshKeyID *string `json:"ssh_key_id,omitempty"`
|
||||
Role *string `json:"role,omitempty" example:"master|worker|bastion"`
|
||||
Status *string `json:"status,omitempty" example:"pending|provisioning|ready|failed"`
|
||||
Role *string `json:"role" example:"master|worker|bastion" enums:"master,worker,bastion"`
|
||||
Status *string `json:"status,omitempty" example:"pending|provisioning|ready|failed" enums:"pending,provisioning,ready,failed"`
|
||||
}
|
||||
|
||||
type ServerResponse struct {
|
||||
@@ -30,8 +30,8 @@ type ServerResponse struct {
|
||||
PrivateIPAddress string `json:"private_ip_address"`
|
||||
SSHUser string `json:"ssh_user"`
|
||||
SshKeyID uuid.UUID `json:"ssh_key_id"`
|
||||
Role string `json:"role"`
|
||||
Status string `json:"status"`
|
||||
Role string `json:"role" example:"master|worker|bastion" enums:"master,worker,bastion"`
|
||||
Status string `json:"status,omitempty" example:"pending|provisioning|ready|failed" enums:"pending,provisioning,ready,failed"`
|
||||
CreatedAt string `json:"created_at,omitempty"`
|
||||
UpdatedAt string `json:"updated_at,omitempty"`
|
||||
}
|
||||
|
||||
1270
internal/handlers/node_pools.go
Normal file
1270
internal/handlers/node_pools.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -382,7 +382,6 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
}
|
||||
|
||||
if mode == "json" {
|
||||
prefix := keyFilenamePrefix(key.PublicKey)
|
||||
resp := dto.SshMaterialJSON{
|
||||
ID: key.ID.String(),
|
||||
Name: key.Name,
|
||||
@@ -392,7 +391,7 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
case "public":
|
||||
pub := key.PublicKey
|
||||
resp.PublicKey = &pub
|
||||
resp.Filenames = []string{fmt.Sprintf("%s_%s.pub", prefix, key.ID.String())}
|
||||
resp.Filenames = []string{fmt.Sprintf("%s.pub", key.ID.String())}
|
||||
utils.WriteJSON(w, http.StatusOK, resp)
|
||||
return
|
||||
|
||||
@@ -403,7 +402,7 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
resp.PrivatePEM = &plain
|
||||
resp.Filenames = []string{fmt.Sprintf("%s_%s.pem", prefix, key.ID.String())}
|
||||
resp.Filenames = []string{fmt.Sprintf("%s.pem", key.ID.String())}
|
||||
utils.WriteJSON(w, http.StatusOK, resp)
|
||||
return
|
||||
|
||||
@@ -416,16 +415,16 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
|
||||
var buf bytes.Buffer
|
||||
zw := zip.NewWriter(&buf)
|
||||
_ = toZipFile(fmt.Sprintf("%s_%s.pem", prefix, key.ID.String()), []byte(plain), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s_%s.pub", prefix, key.ID.String()), []byte(key.PublicKey), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s.pem", key.ID.String()), []byte(plain), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s.pub", key.ID.String()), []byte(key.PublicKey), zw)
|
||||
_ = zw.Close()
|
||||
|
||||
b64 := utils.EncodeB64(buf.Bytes())
|
||||
resp.ZipBase64 = &b64
|
||||
resp.Filenames = []string{
|
||||
fmt.Sprintf("%s_%s.zip", prefix, key.ID.String()),
|
||||
fmt.Sprintf("%s_%s.pem", prefix, key.ID.String()),
|
||||
fmt.Sprintf("%s_%s.pub", prefix, key.ID.String()),
|
||||
fmt.Sprintf("%s.zip", key.ID.String()),
|
||||
fmt.Sprintf("%s.pem", key.ID.String()),
|
||||
fmt.Sprintf("%s.pub", key.ID.String()),
|
||||
}
|
||||
utils.WriteJSON(w, http.StatusOK, resp)
|
||||
return
|
||||
@@ -436,11 +435,9 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
prefix := keyFilenamePrefix(key.PublicKey)
|
||||
|
||||
switch part {
|
||||
case "public":
|
||||
filename := fmt.Sprintf("%s_%s.pub", prefix, key.ID.String())
|
||||
filename := fmt.Sprintf("%s.pub", key.ID.String())
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
_, _ = w.Write([]byte(key.PublicKey))
|
||||
@@ -452,7 +449,7 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
utils.WriteError(w, http.StatusInternalServerError, "db_error", "failed to decrypt ssh key")
|
||||
return
|
||||
}
|
||||
filename := fmt.Sprintf("%s_%s.pem", prefix, key.ID.String())
|
||||
filename := fmt.Sprintf("%s.pem", key.ID.String())
|
||||
w.Header().Set("Content-Type", "application/x-pem-file")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
|
||||
_, _ = w.Write([]byte(plain))
|
||||
@@ -467,8 +464,8 @@ func DownloadSSHKey(db *gorm.DB) http.HandlerFunc {
|
||||
|
||||
var buf bytes.Buffer
|
||||
zw := zip.NewWriter(&buf)
|
||||
_ = toZipFile(fmt.Sprintf("%s_%s.pem", prefix, key.ID.String()), []byte(plain), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s_%s.pub", prefix, key.ID.String()), []byte(key.PublicKey), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s.pem", key.ID.String()), []byte(plain), zw)
|
||||
_ = toZipFile(fmt.Sprintf("%s.pub", key.ID.String()), []byte(key.PublicKey), zw)
|
||||
_ = zw.Close()
|
||||
|
||||
filename := fmt.Sprintf("ssh_key_%s.zip", key.ID.String())
|
||||
|
||||
@@ -9,4 +9,5 @@ type Annotation struct {
|
||||
Organization Organization `gorm:"foreignKey:OrganizationID;constraint:OnDelete:CASCADE" json:"organization"`
|
||||
Key string `gorm:"not null" json:"key"`
|
||||
Value string `gorm:"not null" json:"value"`
|
||||
NodePools []NodePool `gorm:"many2many:node_annotations;constraint:OnDelete:CASCADE" json:"node_pools,omitempty"`
|
||||
}
|
||||
|
||||
29
internal/models/cluster.go
Normal file
29
internal/models/cluster.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
ID uuid.UUID `gorm:"type:uuid;default:gen_random_uuid();primaryKey" json:"id"`
|
||||
OrganizationID uuid.UUID `gorm:"type:uuid;not null" json:"organization_id"`
|
||||
Organization Organization `gorm:"foreignKey:OrganizationID;constraint:OnDelete:CASCADE" json:"organization"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Provider string `json:"provider"`
|
||||
Region string `json:"region"`
|
||||
Status string `json:"status"`
|
||||
CaptainDomain string `gorm:"not null" json:"captain_domain"`
|
||||
ClusterLoadBalancer string `json:"cluster_load_balancer"`
|
||||
RandomToken string `json:"random_token"`
|
||||
CertificateKey string `json:"certificate_key"`
|
||||
EncryptedKubeconfig string `gorm:"type:text" json:"-"`
|
||||
KubeIV string `json:"-"`
|
||||
KubeTag string `json:"-"`
|
||||
NodePools []NodePool `gorm:"many2many:cluster_node_pools;constraint:OnDelete:CASCADE" json:"node_pools,omitempty"`
|
||||
BastionServerID *uuid.UUID `gorm:"type:uuid" json:"bastion_server_id,omitempty"`
|
||||
BastionServer *Server `gorm:"foreignKey:BastionServerID" json:"bastion_server,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at,omitempty" gorm:"type:timestamptz;column:created_at;not null;default:now()"`
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty" gorm:"type:timestamptz;autoUpdateTime;column:updated_at;not null;default:now()"`
|
||||
}
|
||||
@@ -9,5 +9,5 @@ type Label struct {
|
||||
Organization Organization `gorm:"foreignKey:OrganizationID;constraint:OnDelete:CASCADE" json:"organization"`
|
||||
Key string `gorm:"not null" json:"key"`
|
||||
Value string `gorm:"not null" json:"value"`
|
||||
NodePools []NodePool `gorm:"many2many:node_labels;constraint:OnDelete:CASCADE" json:"servers,omitempty"`
|
||||
NodePools []NodePool `gorm:"many2many:node_labels;constraint:OnDelete:CASCADE" json:"node_pools,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,23 +1,18 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/glueops/autoglue/internal/common"
|
||||
)
|
||||
|
||||
type NodePool struct {
|
||||
ID uuid.UUID `gorm:"type:uuid;primaryKey;default:gen_random_uuid()" json:"id"`
|
||||
OrganizationID uuid.UUID `gorm:"type:uuid;not null" json:"organization_id"`
|
||||
Organization Organization `gorm:"foreignKey:OrganizationID;constraint:OnDelete:CASCADE" json:"organization"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Servers []Server `gorm:"many2many:node_servers;constraint:OnDelete:CASCADE" json:"servers,omitempty"`
|
||||
Annotations []Annotation `gorm:"many2many:node_annotations;constraint:OnDelete:CASCADE" json:"annotations,omitempty"`
|
||||
Labels []Label `gorm:"many2many:node_labels;constraint:OnDelete:CASCADE" json:"labels,omitempty"`
|
||||
Taints []Taint `gorm:"many2many:node_taints;constraint:OnDelete:CASCADE" json:"taints,omitempty"`
|
||||
common.AuditFields
|
||||
Organization Organization `gorm:"foreignKey:OrganizationID;constraint:OnDelete:CASCADE" json:"organization"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Servers []Server `gorm:"many2many:node_servers;constraint:OnDelete:CASCADE" json:"servers,omitempty"`
|
||||
Annotations []Annotation `gorm:"many2many:node_annotations;constraint:OnDelete:CASCADE" json:"annotations,omitempty"`
|
||||
Labels []Label `gorm:"many2many:node_labels;constraint:OnDelete:CASCADE" json:"labels,omitempty"`
|
||||
Taints []Taint `gorm:"many2many:node_taints;constraint:OnDelete:CASCADE" json:"taints,omitempty"`
|
||||
//Clusters []Cluster `gorm:"many2many:cluster_node_pools;constraint:OnDelete:CASCADE" json:"clusters,omitempty"`
|
||||
//Topology string `gorm:"not null,default:'stacked'" json:"topology,omitempty"` // stacked or external
|
||||
Role string `gorm:"not null,default:'worker'" json:"role,omitempty"` // master, worker, or etcd (etcd only if topology = external
|
||||
CreatedAt time.Time `gorm:"not null;default:now()" json:"created_at" format:"date-time"`
|
||||
UpdatedAt time.Time `gorm:"not null;default:now()" json:"updated_at" format:"date-time"`
|
||||
Role string `gorm:"not null,default:'worker'" json:"role,omitempty"` // master, worker, or etcd (etcd only if topology = external
|
||||
}
|
||||
|
||||
@@ -19,8 +19,9 @@ type Server struct {
|
||||
SSHUser string `gorm:"not null" json:"ssh_user"`
|
||||
SshKeyID uuid.UUID `gorm:"type:uuid;not null" json:"ssh_key_id"`
|
||||
SshKey SshKey `gorm:"foreignKey:SshKeyID" json:"ssh_key"`
|
||||
Role string `gorm:"not null" json:"role"` // e.g., "master", "worker", "bastion"
|
||||
Status string `gorm:"default:'pending'" json:"status"` // pending, provisioning, ready, failed
|
||||
Role string `gorm:"not null" json:"role" enums:"master,worker,bastion"` // e.g., "master", "worker", "bastion"
|
||||
Status string `gorm:"default:'pending'" json:"status" enums:"pending, provisioning, ready, failed"` // pending, provisioning, ready, failed
|
||||
NodePools []NodePool `gorm:"many2many:node_servers;constraint:OnDelete:CASCADE" json:"node_pools,omitempty"`
|
||||
CreatedAt time.Time `gorm:"not null;default:now()" json:"created_at" format:"date-time"`
|
||||
UpdatedAt time.Time `gorm:"not null;default:now()" json:"updated_at" format:"date-time"`
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user