|
| 1 | +package discovery |
| 2 | + |
| 3 | +import ( |
| 4 | + "encoding/json" |
| 5 | + "sync" |
| 6 | + "time" |
| 7 | + |
| 8 | + "github.com/NinesStack/sidecar/service" |
| 9 | + "github.com/relistan/go-director" |
| 10 | + log "github.com/sirupsen/logrus" |
| 11 | +) |
| 12 | + |
| 13 | +// A K8sAPIDiscoverer is a discovery mechanism that assumes that a K8s cluster |
| 14 | +// with be fronted by a load balancer and that all the ports exposed will match |
| 15 | +// up on both the load balancer and the backing pods. It relies on an underlying |
| 16 | +// command to run the discovery. This is normally `kubectl`. |
| 17 | +type K8sAPIDiscoverer struct { |
| 18 | + Namespace string |
| 19 | + |
| 20 | + Command K8sDiscoveryAdapter |
| 21 | + |
| 22 | + discoveredSvcs *K8sServices |
| 23 | + discoveredNodes *K8sNodes |
| 24 | + lock sync.RWMutex |
| 25 | +} |
| 26 | + |
| 27 | +// NewK8sAPIDiscoverer returns a properly configured K8sAPIDiscoverer |
| 28 | +func NewK8sAPIDiscoverer(kubeHost string, kubePort int, namespace string, timeout time.Duration, credsPath string) *K8sAPIDiscoverer { |
| 29 | + |
| 30 | + cmd := NewKubeAPIDiscoveryCommand(kubeHost, kubePort, namespace, timeout, credsPath) |
| 31 | + |
| 32 | + return &K8sAPIDiscoverer{ |
| 33 | + discoveredSvcs: &K8sServices{}, |
| 34 | + discoveredNodes: &K8sNodes{}, |
| 35 | + Namespace: namespace, |
| 36 | + Command: cmd, |
| 37 | + } |
| 38 | +} |
| 39 | + |
| 40 | +// Services implements part of the Discoverer interface and looks at the last |
| 41 | +// cached data from the Command (`kubectl`) and returns services in a format |
| 42 | +// that Sidecar can manage. |
| 43 | +func (k *K8sAPIDiscoverer) Services() []service.Service { |
| 44 | + k.lock.RLock() |
| 45 | + defer k.lock.RUnlock() |
| 46 | + |
| 47 | + // Enumerate all the K8s nodes we discovered, and for each one, emit all the |
| 48 | + // services that we separately discovered. This means we will attempt to hit |
| 49 | + // the NodePort for each of the nodes when looking for this service. |
| 50 | + var services []service.Service |
| 51 | + for _, node := range k.discoveredNodes.Items { |
| 52 | + hostname, ip := getIPHostForNode(&node) |
| 53 | + |
| 54 | + for _, item := range k.discoveredSvcs.Items { |
| 55 | + svc := service.Service{ |
| 56 | + ID: item.Metadata.UID, |
| 57 | + Name: item.Metadata.Labels.ServiceName, |
| 58 | + Image: item.Metadata.Labels.ServiceName + ":kubernetes-hosted", |
| 59 | + Created: item.Metadata.CreationTimestamp, |
| 60 | + Hostname: hostname, |
| 61 | + ProxyMode: "http", |
| 62 | + Status: service.ALIVE, |
| 63 | + Updated: time.Now().UTC(), |
| 64 | + } |
| 65 | + for _, port := range item.Spec.Ports { |
| 66 | + svc.Ports = append(svc.Ports, service.Port{ |
| 67 | + Type: "tcp", |
| 68 | + Port: int64(port.NodePort), |
| 69 | + ServicePort: int64(port.Port), |
| 70 | + IP: ip, |
| 71 | + }) |
| 72 | + } |
| 73 | + services = append(services, svc) |
| 74 | + } |
| 75 | + } |
| 76 | + |
| 77 | + return services |
| 78 | +} |
| 79 | + |
| 80 | +func getIPHostForNode(node *K8sNode) (hostname string, ip string) { |
| 81 | + for _, address := range node.Status.Addresses { |
| 82 | + if address.Type == "InternalIP" { |
| 83 | + ip = address.Address |
| 84 | + } |
| 85 | + |
| 86 | + if address.Type == "Hostname" { |
| 87 | + hostname = address.Address |
| 88 | + } |
| 89 | + } |
| 90 | + |
| 91 | + return hostname, ip |
| 92 | +} |
| 93 | + |
| 94 | +// HealthCheck implements part of the Discoverer interface and returns the |
| 95 | +// built-in AlwaysSuccessful check, on the assumption that the underlying load |
| 96 | +// balancer we are pointing to will have already health checked the service. |
| 97 | +func (k *K8sAPIDiscoverer) HealthCheck(svc *service.Service) (string, string) { |
| 98 | + return "AlwaysSuccessful", "" |
| 99 | +} |
| 100 | + |
| 101 | +// Listeners implements part of the Discoverer interface and always returns |
| 102 | +// an empty list because it doesn't make sense in this context. |
| 103 | +func (k *K8sAPIDiscoverer) Listeners() []ChangeListener { |
| 104 | + return []ChangeListener{} |
| 105 | +} |
| 106 | + |
| 107 | +// Run is part of the Discoverer interface and calls the Command in a loop, |
| 108 | +// which is injected as a Looper. |
| 109 | +func (k *K8sAPIDiscoverer) Run(looper director.Looper) { |
| 110 | + looper.Loop(func() error { |
| 111 | + data, err := k.getServices() |
| 112 | + if err != nil { |
| 113 | + log.Errorf("Failed to unmarshal services json: %s, %s", err, string(data)) |
| 114 | + } |
| 115 | + |
| 116 | + data, err = k.getNodes() |
| 117 | + if err != nil { |
| 118 | + log.Errorf("Failed to unmarshal nodes json: %s, %s", err, string(data)) |
| 119 | + } |
| 120 | + |
| 121 | + return nil |
| 122 | + }) |
| 123 | +} |
| 124 | + |
| 125 | +func (k *K8sAPIDiscoverer) getServices() ([]byte, error) { |
| 126 | + data, err := k.Command.GetServices() |
| 127 | + if err != nil { |
| 128 | + log.Errorf("Failed to invoke K8s API discovery: %s", err) |
| 129 | + } |
| 130 | + |
| 131 | + k.lock.Lock() |
| 132 | + err = json.Unmarshal(data, &k.discoveredSvcs) |
| 133 | + k.lock.Unlock() |
| 134 | + return data, err |
| 135 | +} |
| 136 | + |
| 137 | +func (k *K8sAPIDiscoverer) getNodes() ([]byte, error) { |
| 138 | + data, err := k.Command.GetNodes() |
| 139 | + if err != nil { |
| 140 | + log.Errorf("Failed to invoke K8s API discovery: %s", err) |
| 141 | + } |
| 142 | + |
| 143 | + k.lock.Lock() |
| 144 | + err = json.Unmarshal(data, &k.discoveredNodes) |
| 145 | + k.lock.Unlock() |
| 146 | + return data, err |
| 147 | +} |
0 commit comments