Skip to content

Commit

Permalink
Merge pull request kubernetes#9211 from lavalamp/no-ro-nonbreaking
Browse files Browse the repository at this point in the history
Nonbreaking pieces of kubernetes#8155
  • Loading branch information
bgrant0607 committed Jun 3, 2015
2 parents a8a3e9d + 40eb159 commit f8bf996
Show file tree
Hide file tree
Showing 23 changed files with 209 additions and 44 deletions.
2 changes: 1 addition & 1 deletion cluster/addons/dns/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ what etcd offers (at least not in the way we use it). For simplicty, we run
etcd and SkyDNS together in a pod, and we do not try to link etcd instances
across replicas. A helper container called [kube2sky](kube2sky/) also runs in
the pod and acts a bridge between Kubernetes and SkyDNS. It finds the
Kubernetes master through the `kubernetes-ro` service (via environment
Kubernetes master through the `kubernetes` service (via environment
variables), pulls service info from the master, and writes that to etcd for
SkyDNS to find.

Expand Down
10 changes: 2 additions & 8 deletions cluster/addons/fluentd-elasticsearch/es-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.3
- image: gcr.io/google_containers/elasticsearch:1.4
name: elasticsearch-logging
ports:
- containerPort: 9200
Expand All @@ -30,14 +30,8 @@ spec:
name: es-transport-port
protocol: TCP
volumeMounts:
- name: token-system-logging
mountPath: /etc/token-system-logging
readOnly: true
- name: es-persistent-storage
mountPath: /data
volumes:
- name: token-system-logging
secret:
secretName: token-system-logging
- name: es-persistent-storage
emptyDir: {}
emptyDir: {}
10 changes: 7 additions & 3 deletions cluster/addons/fluentd-elasticsearch/es-image/Makefile
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
.PHONY: elasticsearch_logging_discovery build push

TAG = 1.3
# Keep this one version ahead to help prevent accidental pushes.
TAG = 1.4

build: elasticsearch_logging_discovery
build: elasticsearch_logging_discovery
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .

push:
push:
gcloud preview docker push gcr.io/google_containers/elasticsearch:$(TAG)

elasticsearch_logging_discovery:
go build elasticsearch_logging_discovery.go

clean:
rm elasticsearch_logging_discovery
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,9 @@ import (

"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
"github.com/golang/glog"
)

var (
kubeconfig = flag.String("kubeconfig", "/etc/token-system-logging/kubeconfig", "kubeconfig file for access")
)

func flattenSubsets(subsets []api.EndpointSubset) []string {
ips := []string{}
for _, ss := range subsets {
Expand All @@ -46,17 +41,7 @@ func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")

settings, err := clientcmd.LoadFromFile(*kubeconfig)
if err != nil {
glog.Fatalf("Error loading configuration from %s: %v", *kubeconfig, err.Error())
}

config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
glog.Fatalf("Failed to construct config: %v", err)
}

c, err := client.New(config)
c, err := client.NewInCluster()
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
Expand Down
6 changes: 4 additions & 2 deletions contrib/prometheus/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ Now, you can access the service `wget 10.0.1.89:9090`, and build graphs.

## How it works

This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES_RO service (the internal kubernetes service running in the default namespace, which is visible to all pods).
This is a v1beta3 based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES service (the internal kubernetes service running in the default namespace, which is visible to all pods).

1. The KUBERNETES_RO service is already running : providing read access to the API metrics.
1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service.

1. The list of services to be monitored is passed as a command line aguments in
the yaml file.
Expand Down Expand Up @@ -74,5 +74,7 @@ at port 9090.
- We should publish this image into the kube/ namespace.
- Possibly use postgre or mysql as a promdash database.
- push gateway (https://github.com/prometheus/pushgateway) setup.
- stop using kubectl to make a local proxy faking the old RO port and build in
real auth capabilities.

[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/prometheus/README.md?pixel)]()
17 changes: 17 additions & 0 deletions contrib/prometheus/prometheus-all.json
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,29 @@
"protocol": "TCP"
}
],
"env": [
{
"name": "KUBERNETES_RO_SERVICE_HOST",
"value": "localhost"
},
{
"name": "KUBERNETES_RO_SERVICE_PORT",
"value": "8001"
}
],
"volumeMounts": [
{
"mountPath": "/var/prometheus/",
"name": "data"
}
]
},
{
"name": "kubectl",
"image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty",
"args": [
"proxy", "-p", "8001"
]
}
],
"volumes": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ spec:
name: heapster
command:
- /heapster
- --source=kubernetes:http://kubernetes-ro?auth=
- --source=kubernetes:http://kubernetes?auth=
- --sink=influxdb:http://monitoring-influxdb:8086
2 changes: 0 additions & 2 deletions docs/getting-started-guides/mesos.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ POD IP CONTAINER(S) IMAGE(S) HOST LABELS
$ bin/kubectl get services # your service IPs will likely differ
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.10.10.1 80
```
Lastly, use the Mesos CLI tool to validate the Kubernetes scheduler framework has been registered and running:
```bash
Expand Down Expand Up @@ -241,7 +240,6 @@ Next, determine the internal IP address of the front end [service][8]:
$ bin/kubectl get services
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.10.10.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.10.10.1 80
redismaster <none> name=redis-master 10.10.10.49 10000
redisslave name=redisslave name=redisslave 10.10.10.109 10001
frontend <none> name=frontend 10.10.10.149 9998
Expand Down
2 changes: 2 additions & 0 deletions examples/kubectl-container/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
kubectl
.tag
18 changes: 18 additions & 0 deletions examples/kubectl-container/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

FROM scratch
MAINTAINER Daniel Smith <[email protected]>
ADD kubectl kubectl
ENTRYPOINT ["/kubectl"]
30 changes: 30 additions & 0 deletions examples/kubectl-container/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Use:
#
# `make kubectl` will build kubectl.
# `make tag` will suggest a tag.
# `make container` will build a container-- you must supply a tag.
# `make push` will push the container-- you must supply a tag.

kubectl:
KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/linux/amd64/kubectl .

.tag: kubectl
./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag

tag: .tag
@echo "Suggest using TAG=$(shell cat .tag)"
@echo "$$ make container TAG=$(shell cat .tag)"
@echo "or"
@echo "$$ make push TAG=$(shell cat .tag)"

container:
$(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion))
docker build -t gcr.io/google_containers/kubectl:$(TAG) .

push: container
$(if $(TAG),,$(error TAG is not defined. Use 'make tag' to see a suggestion))
gcloud preview docker push gcr.io/google_containers/kubectl:$(TAG)

clean:
rm -f kubectl
rm -f .tag
21 changes: 21 additions & 0 deletions examples/kubectl-container/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
This directory contains a Dockerfile and Makefile for packaging up kubectl into
a container.

It's not currently automated as part of a release process, so for the moment
this is an example of what to do if you want to package kubectl into a
container/your pod.

In the future, we may release consistently versioned groups of containers when
we cut a release, in which case the source of gcr.io/google_containers/kubectl
would become that automated process.

```pod.json``` is provided as an example of packaging kubectl as a sidecar
container, and to help you verify that kubectl works correctly in
this configuration.

A possible reason why you would want to do this is to use ```kubectl proxy``` as
a drop-in replacement for the old no-auth KUBERNETES_RO service. The other
containers in your pod will find the proxy apparently serving on localhost.


[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/kubectl-container/README.md?pixel)]()
54 changes: 54 additions & 0 deletions examples/kubectl-container/pod.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"kind": "Pod",
"apiVersion": "v1beta3",
"metadata": {
"name": "kubectl-tester"
},
"spec": {
"containers": [
{
"name": "bb",
"image": "gcr.io/google_containers/busybox",
"command": [
"sh", "-c", "sleep 5; wget -O - ${KUBERNETES_RO_SERVICE_HOST}:${KUBERNETES_RO_SERVICE_PORT}/api/v1beta3/pods/; sleep 10000"
],
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"env": [
{
"name": "KUBERNETES_RO_SERVICE_HOST",
"value": "127.0.0.1"
},
{
"name": "KUBERNETES_RO_SERVICE_PORT",
"value": "8001"
}
],
"volumeMounts": [
{
"name": "test-volume",
"mountPath": "/mount/test-volume"
}
]
},
{
"name": "kubectl",
"image": "gcr.io/google_containers/kubectl:v0.18.0-120-gaeb4ac55ad12b1-dirty",
"imagePullPolicy": "Always",
"args": [
"proxy", "-p", "8001"
]
}
],
"volumes": [
{
"name": "test-volume",
"emptyDir": {}
}
]
}
}
1 change: 0 additions & 1 deletion examples/logging-demo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ elasticsearch-logging kubernetes.io/cluster-service=true,name=elasticsearch-l
kibana-logging kubernetes.io/cluster-service=true,name=kibana-logging name=kibana-logging 10.0.188.118 5601/TCP
kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,name=kube-dns k8s-app=kube-dns 10.0.0.10 53/UDP
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.0.1 80/TCP
monitoring-grafana kubernetes.io/cluster-service=true,name=grafana name=influxGrafana 10.0.254.202 80/TCP
monitoring-heapster kubernetes.io/cluster-service=true,name=heapster name=heapster 10.0.19.214 80/TCP
monitoring-influxdb name=influxGrafana name=influxGrafana 10.0.198.71 80/TCP
Expand Down
1 change: 0 additions & 1 deletion examples/persistent-volumes/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ cluster/kubectl.sh get services
NAME LABELS SELECTOR IP PORT(S)
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.0.1 80/TCP
```
Expand Down
2 changes: 0 additions & 2 deletions examples/spark/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ POD IP CONTAINER(S) IMAGE(S)
spark-master 192.168.90.14 spark-master mattf/spark-master 172.18.145.8/172.18.145.8 name=spark-master Running
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
spark-master name=spark-master name=spark-master 10.254.125.166 7077
```

Expand Down Expand Up @@ -135,7 +134,6 @@ spark-worker-controller-5v48c 192.168.90.17 spark-worker mattf/sp
spark-worker-controller-ehq23 192.168.35.17 spark-worker mattf/spark-worker 172.18.145.12/172.18.145.12 name=spark-worker,uses=spark-master Running
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
spark-master name=spark-master name=spark-master 10.254.125.166 7077

$ sudo docker run -it mattf/spark-base sh
Expand Down
2 changes: 0 additions & 2 deletions examples/storm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ zookeeper 192.168.86.4 zookeeper mattf/zookeeper
$ kubectl get services
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181

$ echo ruok | nc 10.254.139.141 2181; echo
Expand Down Expand Up @@ -97,7 +96,6 @@ Ensure that the Nimbus service is running and functional.
$ kubectl get services
NAME LABELS SELECTOR IP PORT
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.254.0.1 80
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
nimbus name=nimbus name=nimbus 10.254.115.208 6627

Expand Down
5 changes: 5 additions & 0 deletions hack/lib/golang.sh
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,11 @@ readonly KUBE_STATIC_LIBRARIES=(
kube::golang::is_statically_linked_library() {
local e
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
# Allow individual overrides--e.g., so that you can get a static build of
# kubectl for inclusion in a container.
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
fi
return 1;
}

Expand Down
30 changes: 30 additions & 0 deletions pkg/client/clientcmd/client_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package clientcmd

import (
"fmt"
"io"
"os"

Expand Down Expand Up @@ -284,3 +285,32 @@ func (config DirectClientConfig) getCluster() clientcmdapi.Cluster {

return mergedClusterInfo
}

// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
type inClusterClientConfig struct{}

func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
}

func (inClusterClientConfig) ClientConfig() (*client.Config, error) {
return client.InClusterConfig()
}

func (inClusterClientConfig) Namespace() (string, error) {
// TODO: generic way to figure out what namespace you are running in?
// This way assumes you've set the POD_NAMESPACE environment variable
// using the downward API.
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
return ns, nil
}
return "default", nil
}

// Possible returns true if loading an inside-kubernetes-cluster is possible.
func (inClusterClientConfig) Possible() bool {
fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
err == nil && !fi.IsDir()
}
Loading

0 comments on commit f8bf996

Please sign in to comment.