From aee9f9995ca54e81218656bb72f7beabc529532f Mon Sep 17 00:00:00 2001
From: kaiyou <dev@kaiyou.fr>
Date: Thu, 18 May 2023 12:46:14 +0200
Subject: [PATCH] Simplify the authentication mechanisms

Now master services (scheduler and controller manager) all authenticate
using the master bearer token. This goes against the least privilege
principle but those run in the same process as the master itself, so
very little harm is to expect from this simplification.
---
 services/apiserver.go | 75 +++++++++++++++++++------------------------
 services/certs.go     | 30 -----------------
 services/cm.go        |  2 +-
 services/k8s.go       | 26 +++++++++++----
 services/kubelet.go   |  2 +-
 services/manager.go   | 23 +++++++------
 services/scheduler.go |  2 +-
 7 files changed, 68 insertions(+), 92 deletions(-)

diff --git a/services/apiserver.go b/services/apiserver.go
index 8dbaf50..98a17a0 100644
--- a/services/apiserver.go
+++ b/services/apiserver.go
@@ -8,7 +8,6 @@ import (
 	"os"
 	"time"
 
-	"github.com/google/uuid"
 	extensions "k8s.io/apiextensions-apiserver/pkg/apiserver"
 	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -32,8 +31,6 @@ import (
 	"k8s.io/apiserver/pkg/util/flowcontrol/request"
 	"k8s.io/apiserver/pkg/util/notfoundhandler"
 	"k8s.io/apiserver/pkg/util/openapi"
-	"k8s.io/client-go/kubernetes"
-	"k8s.io/client-go/rest"
 	"k8s.io/client-go/tools/clientcmd"
 	"k8s.io/client-go/tools/clientcmd/api"
 	"k8s.io/component-base/version"
@@ -128,18 +125,12 @@ func buildConfig(c *Cluster) (config *server.Config, clients *Clients, err error
 	}
 
 	// Setup loopback clients (no authorization at this point, handled later)
-	loopback, err := config.SecureServing.NewLoopbackClientConfig(uuid.NewString(), nil)
-	loopback.TLSClientConfig.CAFile = c.pki.TLS.CertPath()
-	if err != nil {
-		err = fmt.Errorf("could not setup loopback config: %w", err)
-		return
-	}
-	config.LoopbackClientConfig = loopback
-	clients, err = newClientsForKC(config.LoopbackClientConfig)
+	clients, err = newLoopbackClients(c)
 	if err != nil {
 		err = fmt.Errorf("could not setup loopback clients: %w", err)
 		return
 	}
+	config.LoopbackClientConfig = clients.KubeConfig
 
 	// Setup authentication
 	authConfig := authenticator.Config{
@@ -181,7 +172,7 @@ func buildConfig(c *Cluster) (config *server.Config, clients *Clients, err error
 	}
 
 	// Finally authorize loopback clients
-	server.AuthorizeClientBearerToken(loopback, &config.Authentication, &config.Authorization)
+	server.AuthorizeClientBearerToken(clients.KubeConfig, &config.Authentication, &config.Authorization)
 
 	// Setup service resolver
 	localHost, _ := url.Parse(config.LoopbackClientConfig.Host)
@@ -310,6 +301,32 @@ var kubeApiserver = &Unit{
 			return fmt.Errorf("could not initialize generic apiserver: %w", err)
 		}
 
+		// Finally start the apiserver
+		server := apiServer.GenericAPIServer.PrepareRun()
+		go clients.Start(ctx)
+		return server.Run(ctx.Done())
+	},
+	Ready: func(u *Unit, c *Cluster) bool {
+		u.Logger.Info("checking if apiserver is ready")
+		clients, err := newLoopbackClients(c)
+		if err != nil {
+			return false
+		}
+		_, err = clients.Client.CoreV1().Nodes().List(context.Background(), meta.ListOptions{})
+		if err != nil {
+			return false
+		}
+		return true
+	},
+}
+
+var loopback = &Unit{
+	Dependencies: []*Unit{kubeApiserver},
+	Start: func(u *Unit, c *Cluster, ctx context.Context) error {
+		clients, err := newLoopbackClients(c)
+		if err != nil {
+			return fmt.Errorf("could not get loopback config: %w", err)
+		}
 		// Write a loopback config (different for every start)
 		name := "loopback"
 		clientConfig := api.Config{
@@ -321,11 +338,11 @@ var kubeApiserver = &Unit{
 				AuthInfo: name,
 			}},
 			Clusters: map[string]*api.Cluster{name: {
-				Server:               config.LoopbackClientConfig.Host,
-				CertificateAuthority: config.LoopbackClientConfig.TLSClientConfig.CAFile,
+				Server:               clients.KubeConfig.Host,
+				CertificateAuthority: clients.KubeConfig.TLSClientConfig.CAFile,
 			}},
 			AuthInfos: map[string]*api.AuthInfo{name: {
-				Token: config.LoopbackClientConfig.BearerToken,
+				Token: clients.KubeConfig.BearerToken,
 			}},
 		}
 		err = os.MkdirAll("/root/.kube", 0755)
@@ -336,32 +353,6 @@ var kubeApiserver = &Unit{
 		if err != nil {
 			return fmt.Errorf("could not write privileged kubeconfig: %w", err)
 		}
-
-		// Finally start the apiserver
-		server := apiServer.GenericAPIServer.PrepareRun()
-		go clients.Start(ctx)
-		return server.Run(ctx.Done())
-	},
-	Ready: func(u *Unit, c *Cluster) bool {
-		u.Logger.Info("checking if apiserver is ready")
-		// Use the scheduler certificate for readiness test, which is more relevant than
-		// using the internal privileged API token
-		kc := &rest.Config{
-			Host: fmt.Sprintf("https://[%s]:%d", c.networking.NodeAddress.IP.String(), apiserverPort),
-			TLSClientConfig: rest.TLSClientConfig{
-				CAFile:   c.pki.TLS.CertPath(),
-				CertFile: c.masterCerts.SchedulerAPI.CertPath(),
-				KeyFile:  c.masterCerts.SchedulerAPI.KeyPath(),
-			},
-		}
-		client, err := kubernetes.NewForConfig(rest.AddUserAgent(kc, "scheduler"))
-		if err != nil {
-			return false
-		}
-		_, err = client.CoreV1().Nodes().List(context.Background(), meta.ListOptions{})
-		if err != nil {
-			return false
-		}
-		return true
+		return nil
 	},
 }
diff --git a/services/certs.go b/services/certs.go
index e094dff..f340b40 100644
--- a/services/certs.go
+++ b/services/certs.go
@@ -30,12 +30,6 @@ type MasterCerts struct {
 	Kubelet *pekahi.Certificate
 	// Service certificate for the controller manager
 	ControllersTLS *pekahi.Certificate
-	// API client certificate for the controller manager
-	ControllersAPI *pekahi.Certificate
-	// API client certificate for the scheduler
-	SchedulerAPI *pekahi.Certificate
-	// Root access to the API server
-	RootClient *pekahi.Certificate
 }
 
 // Node certs
@@ -139,24 +133,6 @@ var pkiMaster = &Unit{
 		if err != nil {
 			return err
 		}
-		// Controller manager API client certificate
-		controllersAPICert, err := bundle.GetCertOrCSR("controllers-api",
-			pekahi.NewClientTemplate("system:kube-controller-manager", ""),
-		)
-		if err != nil {
-			return err
-		}
-		// Scheduler API client certificate
-		schedulerAPICert, err := bundle.GetCertOrCSR("scheduler-api",
-			pekahi.NewClientTemplate("system:kube-scheduler", ""),
-		)
-		if err != nil {
-			return err
-		}
-		// Root client certificate
-		rootClientCert, err := bundle.GetCertOrCSR("root",
-			pekahi.NewClientTemplate("root", "system:masters"),
-		)
 
 		m := &MasterCerts{
 			TLS:            tlsCert,
@@ -164,16 +140,10 @@ var pkiMaster = &Unit{
 			EtcdTokens:     etcdTokenKey,
 			Kubelet:        kubeletCert,
 			ControllersTLS: controllersTLSCert,
-			ControllersAPI: controllersAPICert,
-			SchedulerAPI:   schedulerAPICert,
-			RootClient:     rootClientCert,
 		}
 		c.pki.TLS.Sign(m.TLS, pekahi.NewServerTemplate(m.TLS.CSR.DNSNames, m.TLS.CSR.IPAddresses))
 		c.pki.Kubelet.Sign(m.Kubelet, pekahi.NewClientTemplate(m.Kubelet.CSR.Subject.CommonName, "system:masters"))
 		c.pki.TLS.Sign(m.ControllersTLS, pekahi.NewServerTemplate(m.ControllersTLS.CSR.DNSNames, m.ControllersTLS.CSR.IPAddresses))
-		c.pki.API.Sign(m.ControllersAPI, pekahi.NewClientTemplate(m.ControllersAPI.CSR.Subject.CommonName, ""))
-		c.pki.API.Sign(m.SchedulerAPI, pekahi.NewClientTemplate(m.SchedulerAPI.CSR.Subject.CommonName, ""))
-		c.pki.API.Sign(m.RootClient, pekahi.NewClientTemplate(m.RootClient.CSR.Subject.CommonName, "system:masters"))
 		c.masterCerts = m
 		u.Manager.Logger.Info("master pki initialized")
 		return nil
diff --git a/services/cm.go b/services/cm.go
index 5809458..326c422 100644
--- a/services/cm.go
+++ b/services/cm.go
@@ -41,7 +41,7 @@ var kubeControllerManager = &Unit{
 		// Used as a replacement for InformersStarted in vanilla code
 		allReady := make(chan struct{})
 
-		clients, err := newClients(c, c.networking.NodeAddress.IP, c.masterCerts.ControllersAPI)
+		clients, err := newLoopbackClients(c)
 		if err != nil {
 			return err
 		}
diff --git a/services/k8s.go b/services/k8s.go
index 09455d5..06158fc 100644
--- a/services/k8s.go
+++ b/services/k8s.go
@@ -3,7 +3,6 @@ package services
 import (
 	"context"
 	"fmt"
-	"net"
 
 	"github.com/spf13/pflag"
 	"go.acides.org/pekahi"
@@ -44,15 +43,28 @@ type Clients struct {
 	DynInformer    dynamicinformer.DynamicSharedInformerFactory
 }
 
-func newClients(c *Cluster, masterIP net.IP, cert *pekahi.Certificate) (*Clients, error) {
-	kc := &rest.Config{
-		Host: fmt.Sprintf("https://[%s]:%d", masterIP.String(), apiserverPort),
+// Make a k8s client config for connecting to the master
+func newKC(c *Cluster) *rest.Config {
+	return &rest.Config{
+		Host: fmt.Sprintf("https://[%s]:%d", c.masterNode.VpnIP.String(), apiserverPort),
 		TLSClientConfig: rest.TLSClientConfig{
-			CAFile:   c.pki.TLS.CertPath(),
-			CertFile: cert.CertPath(),
-			KeyFile:  cert.KeyPath(),
+			CAFile: c.pki.TLS.CertPath(),
 		},
 	}
+}
+
+// Make clients authenticating with a ceritficate (typically for kubelets)
+func newCertClients(c *Cluster, cert *pekahi.Certificate) (*Clients, error) {
+	kc := newKC(c)
+	kc.TLSClientConfig.CertFile = cert.CertPath()
+	kc.TLSClientConfig.KeyFile = cert.KeyPath()
+	return newClientsForKC(kc)
+}
+
+// Make clients from the master context itself
+func newLoopbackClients(c *Cluster) (*Clients, error) {
+	kc := newKC(c)
+	kc.BearerToken = c.loopbackToken
 	return newClientsForKC(kc)
 }
 
diff --git a/services/kubelet.go b/services/kubelet.go
index a1b43b0..a323ca7 100644
--- a/services/kubelet.go
+++ b/services/kubelet.go
@@ -44,7 +44,7 @@ var kubeKubelet = &Unit{
 		// (very difficult to check otherwise)
 		time.Sleep(10 * time.Second)
 		kubeletRoot := path.Join(c.settings.DataDir, "kubelet")
-		clients, err := newClients(c, c.masterNode.VpnIP, c.certs.API)
+		clients, err := newCertClients(c, c.certs.API)
 		if err != nil {
 			return fmt.Errorf("could not create clients: %w", err)
 		}
diff --git a/services/manager.go b/services/manager.go
index 6954a5e..e3fc0c5 100644
--- a/services/manager.go
+++ b/services/manager.go
@@ -9,6 +9,7 @@ import (
 
 	"github.com/containerd/containerd/services/server"
 	"github.com/go-logr/logr"
+	"github.com/google/uuid"
 	"github.com/sirupsen/logrus"
 	"go.acides.org/daeman"
 	"go.acides.org/hepto/utils"
@@ -63,10 +64,11 @@ type Cluster struct {
 	state      *HeptoState
 	masterNode *HeptoMeta
 
-	certs       *NodeCerts
-	masterCerts *MasterCerts
-	pki         *ClusterCA
-	containerd  *server.Server
+	certs         *NodeCerts
+	masterCerts   *MasterCerts
+	pki           *ClusterCA
+	containerd    *server.Server
+	loopbackToken string
 
 	vpn *wg.Wireguard
 }
@@ -86,10 +88,11 @@ func NewManager(settings *ClusterSettings, node *NodeSettings, logger logr.Logge
 		API:     bundle.GetCertificate("api"),
 	}
 	cluster := &Cluster{
-		settings:   settings,
-		node:       node,
-		networking: networking,
-		pki:        pki,
+		settings:      settings,
+		node:          node,
+		networking:    networking,
+		pki:           pki,
+		loopbackToken: uuid.NewString(),
 		localNode: &HeptoMeta{
 			Name:     node.Name,
 			PublicIP: node.IP,
@@ -105,11 +108,11 @@ func NewManager(settings *ClusterSettings, node *NodeSettings, logger logr.Logge
 	units := []*Unit{}
 	switch node.Role {
 	case "master":
-		units = append(units, memberlist, vpn, kubeApiserver, kubeControllerManager, kubeScheduler)
+		units = append(units, memberlist, vpn, kubeApiserver, loopback, kubeControllerManager, kubeScheduler)
 	case "node":
 		units = append(units, memberlist, vpn, kubeKubelet)
 	case "full":
-		units = append(units, memberlist, vpn, kubeApiserver, kubeControllerManager, kubeScheduler, kubeKubelet)
+		units = append(units, memberlist, vpn, kubeApiserver, loopback, kubeControllerManager, kubeScheduler, kubeKubelet)
 	}
 	return daeman.NewManager(cluster, units, logger), nil
 }
diff --git a/services/scheduler.go b/services/scheduler.go
index b5b34a3..3f4aeb3 100644
--- a/services/scheduler.go
+++ b/services/scheduler.go
@@ -10,7 +10,7 @@ var kubeScheduler = &Unit{
 	Name:         "kube-scheduler",
 	Dependencies: []*Unit{kubeApiserver, pkiCA, pkiMaster, kubeLogger},
 	Run: func(u *Unit, c *Cluster, ctx context.Context) error {
-		clients, err := newClients(c, c.networking.NodeAddress.IP, c.masterCerts.SchedulerAPI)
+		clients, err := newLoopbackClients(c)
 		if err != nil {
 			return err
 		}
-- 
GitLab