diff --git a/cluster/addons/dns/README.md b/cluster/addons/dns/README.md index d375a67454184..9b55273372741 100644 --- a/cluster/addons/dns/README.md +++ b/cluster/addons/dns/README.md @@ -87,7 +87,7 @@ what etcd offers (at least not in the way we use it). For simplicty, we run etcd and SkyDNS together in a pod, and we do not try to link etcd instances across replicas. A helper container called [kube2sky](kube2sky/) also runs in the pod and acts a bridge between Kubernetes and SkyDNS. It finds the -Kubernetes master through the `kubernetes-ro` service (via environment +Kubernetes master through the `kubernetes` service (via environment variables), pulls service info from the master, and writes that to etcd for SkyDNS to find. diff --git a/cluster/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/addons/fluentd-elasticsearch/es-controller.yaml index 3722209c020aa..42cd434d952c0 100644 --- a/cluster/addons/fluentd-elasticsearch/es-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-controller.yaml @@ -20,7 +20,7 @@ spec: kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/elasticsearch:1.3 + - image: gcr.io/google_containers/elasticsearch:1.4 name: elasticsearch-logging ports: - containerPort: 9200 @@ -30,14 +30,8 @@ spec: name: es-transport-port protocol: TCP volumeMounts: - - name: token-system-logging - mountPath: /etc/token-system-logging - readOnly: true - name: es-persistent-storage mountPath: /data volumes: - - name: token-system-logging - secret: - secretName: token-system-logging - name: es-persistent-storage - emptyDir: {} \ No newline at end of file + emptyDir: {} diff --git a/cluster/addons/fluentd-elasticsearch/es-image/Makefile b/cluster/addons/fluentd-elasticsearch/es-image/Makefile index 254ed33309478..2d4e5df6126c4 100755 --- a/cluster/addons/fluentd-elasticsearch/es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/es-image/Makefile @@ -1,12 +1,16 @@ .PHONY: elasticsearch_logging_discovery build push -TAG = 1.3 +# Keep this one version ahead to help prevent accidental pushes. +TAG = 1.4 -build: elasticsearch_logging_discovery +build: elasticsearch_logging_discovery docker build -t gcr.io/google_containers/elasticsearch:$(TAG) . -push: +push: gcloud preview docker push gcr.io/google_containers/elasticsearch:$(TAG) elasticsearch_logging_discovery: go build elasticsearch_logging_discovery.go + +clean: + rm elasticsearch_logging_discovery diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index 6b3e63642276f..89e78c1459a17 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -24,14 +24,9 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" - "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" "github.com/golang/glog" ) -var ( - kubeconfig = flag.String("kubeconfig", "/etc/token-system-logging/kubeconfig", "kubeconfig file for access") -) - func flattenSubsets(subsets []api.EndpointSubset) []string { ips := []string{} for _, ss := range subsets { @@ -46,17 +41,7 @@ func main() { flag.Parse() glog.Info("Kubernetes Elasticsearch logging discovery") - settings, err := clientcmd.LoadFromFile(*kubeconfig) - if err != nil { - glog.Fatalf("Error loading configuration from %s: %v", *kubeconfig, err.Error()) - } - - config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - glog.Fatalf("Failed to construct config: %v", err) - } - - c, err := client.New(config) + c, err := client.NewInCluster() if err != nil { glog.Fatalf("Failed to make client: %v", err) } diff --git a/contrib/prometheus/README.md b/contrib/prometheus/README.md index 9edcc8989388b..159f4965742ef 100644 --- a/contrib/prometheus/README.md +++ b/contrib/prometheus/README.md @@ -43,9 +43,9 @@ Now, you can access the service `wget 10.0.1.89:9090`, and build graphs. ## How it works -This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES_RO service (the internal kubernetes service running in the default namespace, which is visible to all pods). +This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES service (the internal kubernetes service running in the default namespace, which is visible to all pods). -1. The KUBERNETES_RO service is already running : providing read access to the API metrics. +1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service. 1. The list of services to be monitored is passed as a command line aguments in the yaml file. @@ -74,5 +74,7 @@ at port 9090. - We should publish this image into the kube/ namespace. - Possibly use postgre or mysql as a promdash database. - push gateway (https://github.com/prometheus/pushgateway) setup. +- stop using kubectl to make a local proxy faking the old RO port and build in + real auth capabilities. [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/prometheus/README.md?pixel)]() diff --git a/contrib/prometheus/prometheus-all.json b/contrib/prometheus/prometheus-all.json index 666c123ee948f..a751b72519a00 100644 --- a/contrib/prometheus/prometheus-all.json +++ b/contrib/prometheus/prometheus-all.json @@ -54,12 +54,29 @@ "protocol": "TCP" } ], + "env": [ + { + "name": "KUBERNETES_RO_SERVICE_HOST", + "value": "localhost" + }, + { + "name": "KUBERNETES_RO_SERVICE_PORT", + "value": "8001" + } + ], "volumeMounts": [ { "mountPath": "/var/prometheus/", "name": "data" } ] + }, + { + "name": "kubectl", + "image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", + "args": [ + "proxy", "-p", "8001" + ] } ], "volumes": [ diff --git a/docs/getting-started-guides/coreos/azure/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/docs/getting-started-guides/coreos/azure/addons/cluster-monitoring/influxdb/heapster-controller.yaml index f82d7063b689d..266eb5d2767e3 100644 --- a/docs/getting-started-guides/coreos/azure/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/docs/getting-started-guides/coreos/azure/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -20,5 +20,5 @@ spec: name: heapster command: - /heapster - - --source=kubernetes:http://kubernetes-ro?auth= + - --source=kubernetes:http://kubernetes?auth= - --sink=influxdb:http://monitoring-influxdb:8086 diff --git a/docs/getting-started-guides/mesos.md b/docs/getting-started-guides/mesos.md index 7c4067ed1d188..89976871c42f0 100644 --- a/docs/getting-started-guides/mesos.md +++ b/docs/getting-started-guides/mesos.md @@ -116,7 +116,6 @@ POD IP CONTAINER(S) IMAGE(S) HOST LABELS $ bin/kubectl get services # your service IPs will likely differ NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.10.10.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.10.10.1 80 ``` Lastly, use the Mesos CLI tool to validate the Kubernetes scheduler framework has been registered and running: ```bash @@ -241,7 +240,6 @@ Next, determine the internal IP address of the front end [service][8]: $ bin/kubectl get services NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.10.10.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.10.10.1 80 redismaster name=redis-master 10.10.10.49 10000 redisslave name=redisslave name=redisslave 10.10.10.109 10001 frontend name=frontend 10.10.10.149 9998 diff --git a/examples/kubectl-container/.gitignore b/examples/kubectl-container/.gitignore new file mode 100644 index 0000000000000..50a4a06fd1d91 --- /dev/null +++ b/examples/kubectl-container/.gitignore @@ -0,0 +1,2 @@ +kubectl +.tag diff --git a/examples/kubectl-container/Dockerfile b/examples/kubectl-container/Dockerfile new file mode 100644 index 0000000000000..d27d357364481 --- /dev/null +++ b/examples/kubectl-container/Dockerfile @@ -0,0 +1,18 @@ +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM scratch +MAINTAINER Daniel Smith +ADD kubectl kubectl +ENTRYPOINT ["/kubectl"] diff --git a/examples/kubectl-container/Makefile b/examples/kubectl-container/Makefile new file mode 100644 index 0000000000000..b13b09d2ec403 --- /dev/null +++ b/examples/kubectl-container/Makefile @@ -0,0 +1,30 @@ +# Use: +# +# `make kubectl` will build kubectl. +# `make tag` will suggest a tag. +# `make container` will build a container-- you must supply a tag. +# `make push` will push the container-- you must supply a tag. + +kubectl: + KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/linux/amd64/kubectl . + +.tag: kubectl + ./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag + +tag: .tag + @echo "Suggest using TAG=$(shell cat .tag)" + @echo "$$ make container TAG=$(shell cat .tag)" + @echo "or" + @echo "$$ make push TAG=$(shell cat .tag)" + +container: + $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) + docker build -t gcr.io/google_containers/kubectl:$(TAG) . + +push: container + $(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion)) + gcloud preview docker push gcr.io/google_containers/kubectl:$(TAG) + +clean: + rm -f kubectl + rm -f .tag diff --git a/examples/kubectl-container/README.md b/examples/kubectl-container/README.md new file mode 100644 index 0000000000000..1d37732af6af6 --- /dev/null +++ b/examples/kubectl-container/README.md @@ -0,0 +1,21 @@ +This directory contains a Dockerfile and Makefile for packaging up kubectl into +a container. + +It's not currently automated as part of a release process, so for the moment +this is an example of what to do if you want to package kubectl into a +container/your pod. + +In the future, we may release consistently versioned groups of containers when +we cut a release, in which case the source of gcr.io/google_containers/kubectl +would become that automated process. + +```pod.json``` is provided as an example of packaging kubectl as a sidecar +container, and to help you verify that kubectl works correctly in +this configuration. + +A possible reason why you would want to do this is to use ```kubectl proxy``` as +a drop-in replacement for the old no-auth KUBERNETES_RO service. The other +containers in your pod will find the proxy apparently serving on localhost. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/kubectl-container/README.md?pixel)]() diff --git a/examples/kubectl-container/pod.json b/examples/kubectl-container/pod.json new file mode 100644 index 0000000000000..756090862f201 --- /dev/null +++ b/examples/kubectl-container/pod.json @@ -0,0 +1,54 @@ +{ + "kind": "Pod", + "apiVersion": "v1beta3", + "metadata": { + "name": "kubectl-tester" + }, + "spec": { + "containers": [ + { + "name": "bb", + "image": "gcr.io/google_containers/busybox", + "command": [ + "sh", "-c", "sleep 5; wget -O - ${KUBERNETES_RO_SERVICE_HOST}:${KUBERNETES_RO_SERVICE_PORT}/api/v1beta3/pods/; sleep 10000" + ], + "ports": [ + { + "containerPort": 8080, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "KUBERNETES_RO_SERVICE_HOST", + "value": "127.0.0.1" + }, + { + "name": "KUBERNETES_RO_SERVICE_PORT", + "value": "8001" + } + ], + "volumeMounts": [ + { + "name": "test-volume", + "mountPath": "/mount/test-volume" + } + ] + }, + { + "name": "kubectl", + "image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty", + "imagePullPolicy": "Always", + "args": [ + "proxy", "-p", "8001" + ] + } + ], + "volumes": [ + { + "name": "test-volume", + "emptyDir": {} + } + ] + } +} diff --git a/examples/logging-demo/README.md b/examples/logging-demo/README.md index a655e1db30701..c50a77ac3e0b9 100644 --- a/examples/logging-demo/README.md +++ b/examples/logging-demo/README.md @@ -104,7 +104,6 @@ elasticsearch-logging kubernetes.io/cluster-service=true,name=elasticsearch-l kibana-logging kubernetes.io/cluster-service=true,name=kibana-logging name=kibana-logging 10.0.188.118 5601/TCP kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,name=kube-dns k8s-app=kube-dns 10.0.0.10 53/UDP kubernetes component=apiserver,provider=kubernetes 10.0.0.2 443/TCP -kubernetes-ro component=apiserver,provider=kubernetes 10.0.0.1 80/TCP monitoring-grafana kubernetes.io/cluster-service=true,name=grafana name=influxGrafana 10.0.254.202 80/TCP monitoring-heapster kubernetes.io/cluster-service=true,name=heapster name=heapster 10.0.19.214 80/TCP monitoring-influxdb name=influxGrafana name=influxGrafana 10.0.198.71 80/TCP diff --git a/examples/persistent-volumes/README.md b/examples/persistent-volumes/README.md index 42a1f40c4a7bb..2c2680c91cf02 100644 --- a/examples/persistent-volumes/README.md +++ b/examples/persistent-volumes/README.md @@ -89,7 +89,6 @@ cluster/kubectl.sh get services NAME LABELS SELECTOR IP PORT(S) frontendservice name=frontendhttp 10.0.0.241 3000/TCP kubernetes component=apiserver,provider=kubernetes 10.0.0.2 443/TCP -kubernetes-ro component=apiserver,provider=kubernetes 10.0.0.1 80/TCP ``` diff --git a/examples/spark/README.md b/examples/spark/README.md index 15945d55fc050..0572236ba40c4 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -54,7 +54,6 @@ POD IP CONTAINER(S) IMAGE(S) spark-master 192.168.90.14 spark-master mattf/spark-master 172.18.145.8/172.18.145.8 name=spark-master Running NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.254.0.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.254.0.1 80 spark-master name=spark-master name=spark-master 10.254.125.166 7077 ``` @@ -135,7 +134,6 @@ spark-worker-controller-5v48c 192.168.90.17 spark-worker mattf/sp spark-worker-controller-ehq23 192.168.35.17 spark-worker mattf/spark-worker 172.18.145.12/172.18.145.12 name=spark-worker,uses=spark-master Running NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.254.0.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.254.0.1 80 spark-master name=spark-master name=spark-master 10.254.125.166 7077 $ sudo docker run -it mattf/spark-base sh diff --git a/examples/storm/README.md b/examples/storm/README.md index 71902e0e84e3f..3c93298db5eca 100644 --- a/examples/storm/README.md +++ b/examples/storm/README.md @@ -62,7 +62,6 @@ zookeeper 192.168.86.4 zookeeper mattf/zookeeper $ kubectl get services NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.254.0.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.254.0.1 80 zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181 $ echo ruok | nc 10.254.139.141 2181; echo @@ -97,7 +96,6 @@ Ensure that the Nimbus service is running and functional. $ kubectl get services NAME LABELS SELECTOR IP PORT kubernetes component=apiserver,provider=kubernetes 10.254.0.2 443 -kubernetes-ro component=apiserver,provider=kubernetes 10.254.0.1 80 zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181 nimbus name=nimbus name=nimbus 10.254.115.208 6627 diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 6283a559b1a0c..97aff8e394d6f 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -99,6 +99,11 @@ readonly KUBE_STATIC_LIBRARIES=( kube::golang::is_statically_linked_library() { local e for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done; + # Allow individual overrides--e.g., so that you can get a static build of + # kubectl for inclusion in a container. + if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then + for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done; + fi return 1; } diff --git a/pkg/client/clientcmd/client_config.go b/pkg/client/clientcmd/client_config.go index 21cae3d25b396..8e0d39b91f698 100644 --- a/pkg/client/clientcmd/client_config.go +++ b/pkg/client/clientcmd/client_config.go @@ -17,6 +17,7 @@ limitations under the License. package clientcmd import ( + "fmt" "io" "os" @@ -284,3 +285,32 @@ func (config DirectClientConfig) getCluster() clientcmdapi.Cluster { return mergedClusterInfo } + +// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. +type inClusterClientConfig struct{} + +func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") +} + +func (inClusterClientConfig) ClientConfig() (*client.Config, error) { + return client.InClusterConfig() +} + +func (inClusterClientConfig) Namespace() (string, error) { + // TODO: generic way to figure out what namespace you are running in? + // This way assumes you've set the POD_NAMESPACE environment variable + // using the downward API. + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + return ns, nil + } + return "default", nil +} + +// Possible returns true if loading an inside-kubernetes-cluster is possible. +func (inClusterClientConfig) Possible() bool { + fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") + return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "" && + err == nil && !fi.IsDir() +} diff --git a/pkg/client/clientcmd/merged_client_builder.go b/pkg/client/clientcmd/merged_client_builder.go index 49a3b73954fb7..b7f75b29bd374 100644 --- a/pkg/client/clientcmd/merged_client_builder.go +++ b/pkg/client/clientcmd/merged_client_builder.go @@ -45,6 +45,11 @@ func NewInteractiveDeferredLoadingClientConfig(loadingRules *ClientConfigLoading } func (config DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { + // Are we running in a cluster? If so, use that. + icc := inClusterClientConfig{} + if icc.Possible() { + return icc, nil + } mergedConfig, err := config.loadingRules.Load() if err != nil { return nil, err diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 9ccde2d9d2c9c..c598d21d2414a 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -290,7 +290,7 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { // 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this, // then no other kubeconfig files are merged. This file must exist. // 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged. -// 3. HomeDirectoryLocation +// 3. HomeDirectoryLocation // Empty filenames are ignored. Files with non-deserializable content produced errors. // The first file to set a particular value or map key wins and the value or map key is never changed. // This means that the first file to set CurrentContext will have its context preserved. It also means @@ -316,6 +316,13 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { // 2. If the command line does not specify one, and the auth info has conflicting techniques, fail. // 3. If the command line specifies one and the auth info specifies another, honor the command line technique. // 2. Use default values and potentially prompt for auth information +// +// However, if it appears that we're running in a kubernetes cluster +// container environment, then run with the auth info kubernetes mounted for +// us. Specifically: +// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are +// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token +// exists and is not a directory. func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index e30e527a16c11..a3822572191e4 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -872,7 +872,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Cont return opts, nil } -var masterServices = util.NewStringSet("kubernetes", "kubernetes-ro") +var masterServices = util.NewStringSet("kubernetes") // getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { @@ -909,8 +909,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { serviceMap[serviceName] = service case kl.masterServiceNamespace: if masterServices.Has(serviceName) { - _, exists := serviceMap[serviceName] - if !exists { + if _, exists := serviceMap[serviceName]; !exists { serviceMap[serviceName] = service } } diff --git a/pkg/master/controller.go b/pkg/master/controller.go index 96099057a2dec..c32845d502ff0 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -35,7 +35,7 @@ import ( ) // Controller is the controller manager for the core bootstrap Kubernetes controller -// loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default" +// loops, which manage creating the "kubernetes" service, the "default" // namespace, and provide the IP repair check on service IPs type Controller struct { NamespaceRegistry namespace.Registry