forked from pulumi/examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
__main__.py
97 lines (89 loc) · 3.96 KB
/
__main__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from pulumi import Config, export, get_project, get_stack, Output, ResourceOptions
from pulumi_gcp.config import project, zone
from pulumi_gcp.container import Cluster, ClusterNodeConfigArgs
from pulumi_kubernetes import Provider
from pulumi_kubernetes.apps.v1 import Deployment, DeploymentSpecArgs
from pulumi_kubernetes.core.v1 import ContainerArgs, PodSpecArgs, PodTemplateSpecArgs, Service, ServicePortArgs, ServiceSpecArgs
from pulumi_kubernetes.meta.v1 import LabelSelectorArgs, ObjectMetaArgs
from pulumi_random import RandomPassword
# Read in some configurable settings for our cluster:
config = Config(None)
# nodeCount is the number of cluster nodes to provision. Defaults to 3 if unspecified.
NODE_COUNT = config.get_int('node_count') or 3
# nodeMachineType is the machine type to use for cluster nodes. Defaults to n1-standard-1 if unspecified.
# See https://cloud.google.com/compute/docs/machine-types for more details on available machine types.
NODE_MACHINE_TYPE = config.get('node_machine_type') or 'n1-standard-1'
# username is the admin username for the cluster.
USERNAME = config.get('username') or 'admin'
# password is the password for the admin user in the cluster.
PASSWORD = config.get_secret('password') or RandomPassword("password", length=20, special=True).result
# master version of GKE engine
MASTER_VERSION = config.get('master_version')
# Now, actually create the GKE cluster.
k8s_cluster = Cluster('gke-cluster',
initial_node_count=NODE_COUNT,
node_version=MASTER_VERSION,
min_master_version=MASTER_VERSION,
node_config=ClusterNodeConfigArgs(
machine_type=NODE_MACHINE_TYPE,
oauth_scopes=[
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.read_only',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring'
],
),
)
# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint, k8s_cluster.master_auth)
k8s_config = k8s_info.apply(
lambda info: """apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {0}
server: https://{1}
name: {2}
contexts:
- context:
cluster: {2}
user: {2}
name: {2}
current-context: {2}
kind: Config
preferences: {{}}
users:
- name: {2}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: gke-gcloud-auth-plugin
installHint: Install gke-gcloud-auth-plugin for use with kubectl by following
https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
provideClusterInfo: true
""".format(info[2]['cluster_ca_certificate'], info[1], '{0}_{1}_{2}'.format(project, zone, info[0])))
# Make a Kubernetes provider instance that uses our cluster from above.
k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config)
# Create a canary deployment to test that this cluster works.
labels = { 'app': 'canary-{0}-{1}'.format(get_project(), get_stack()) }
canary = Deployment('canary',
spec=DeploymentSpecArgs(
selector=LabelSelectorArgs(match_labels=labels),
replicas=1,
template=PodTemplateSpecArgs(
metadata=ObjectMetaArgs(labels=labels),
spec=PodSpecArgs(containers=[ContainerArgs(name='nginx', image='nginx')]),
),
), opts=ResourceOptions(provider=k8s_provider)
)
ingress = Service('ingress',
spec=ServiceSpecArgs(
type='LoadBalancer',
selector=labels,
ports=[ServicePortArgs(port=80)],
), opts=ResourceOptions(provider=k8s_provider)
)
# Finally, export the kubeconfig so that the client can easily access the cluster.
export('kubeconfig', k8s_config)
# Export the k8s ingress IP to access the canary deployment
export('ingress_ip', ingress.status.apply(lambda status: status.load_balancer.ingress[0].ip))