terraform { required_providers { coder = { source = "coder/coder" version = "0.6.10" } kubernetes = { source = "hashicorp/kubernetes" version = "~> 2.12.1" } } } variable "use_kubeconfig" { type = bool sensitive = true description = <<-EOF Use host kubeconfig? (true/false) Set this to false if the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to. Set this to true if the Coder host is running outside the Kubernetes cluster for workspaces. A valid "~/.kube/config" must be present on the Coder host. EOF } variable "namespace" { type = string sensitive = true description = "The namespace to create workspaces in (must exist prior to creating workspaces)" } variable "home_disk_size" { type = number description = "How large would you like your home volume to be (in GB)?" default = 10 validation { condition = var.home_disk_size >= 1 error_message = "Value must be greater than or equal to 1." } } provider "kubernetes" { # Authenticate via ~/.kube/config or a Coder-specific ServiceAccount, depending on admin preferences config_path = var.use_kubeconfig == true ? "~/.kube/config" : null } data "coder_workspace" "me" {} resource "coder_agent" "main" { os = "linux" arch = "amd64" login_before_ready = false startup_script_timeout = 180 startup_script = <<-EOT set -e # home folder can be empty, so copying default bash settings if [ ! -f ~/.profile ]; then cp /etc/skel/.profile $HOME fi if [ ! -f ~/.bashrc ]; then cp /etc/skel/.bashrc $HOME fi # install and start code-server curl -fsSL https://code-server.dev/install.sh | sh -s -- --version 4.8.3 code-server --auth none --port 13337 >/tmp/code-server.log 2>&1 & EOT } # code-server resource "coder_app" "code-server" { agent_id = coder_agent.main.id slug = "code-server" display_name = "code-server" icon = "/icon/code.svg" url = "http://localhost:13337?folder=/home/coder" subdomain = false share = "owner" healthcheck { url = "http://localhost:13337/healthz" interval = 3 threshold = 10 } } resource "kubernetes_persistent_volume_claim" "home" { metadata { name = "coder-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}-home" namespace = var.namespace labels = { "app.kubernetes.io/name" = "coder-pvc" "app.kubernetes.io/instance" = "coder-pvc-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}" "app.kubernetes.io/part-of" = "coder" // Coder specific labels. "com.coder.resource" = "true" "com.coder.workspace.id" = data.coder_workspace.me.id "com.coder.workspace.name" = data.coder_workspace.me.name "com.coder.user.id" = data.coder_workspace.me.owner_id "com.coder.user.username" = data.coder_workspace.me.owner } annotations = { "com.coder.user.email" = data.coder_workspace.me.owner_email } } wait_until_bound = false spec { access_modes = ["ReadWriteOnce"] resources { requests = { storage = "${var.home_disk_size}Gi" } } } } resource "kubernetes_pod" "main" { count = data.coder_workspace.me.start_count metadata { name = "coder-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}" namespace = var.namespace labels = { "app.kubernetes.io/name" = "coder-workspace" "app.kubernetes.io/instance" = "coder-workspace-${lower(data.coder_workspace.me.owner)}-${lower(data.coder_workspace.me.name)}" "app.kubernetes.io/part-of" = "coder" // Coder specific labels. "com.coder.resource" = "true" "com.coder.workspace.id" = data.coder_workspace.me.id "com.coder.workspace.name" = data.coder_workspace.me.name "com.coder.user.id" = data.coder_workspace.me.owner_id "com.coder.user.username" = data.coder_workspace.me.owner } annotations = { "com.coder.user.email" = data.coder_workspace.me.owner_email } } spec { security_context { run_as_user = "1000" fs_group = "1000" } container { name = "dev" image = "codercom/enterprise-base:ubuntu" command = ["sh", "-c", coder_agent.main.init_script] security_context { run_as_user = "1000" } env { name = "CODER_AGENT_TOKEN" value = coder_agent.main.token } volume_mount { mount_path = "/home/coder" name = "home" read_only = false } } volume { name = "home" persistent_volume_claim { claim_name = kubernetes_persistent_volume_claim.home.metadata.0.name read_only = false } } affinity { pod_anti_affinity { // This affinity attempts to spread out all workspace pods evenly across // nodes. preferred_during_scheduling_ignored_during_execution { weight = 1 pod_affinity_term { topology_key = "kubernetes.io/hostname" label_selector { match_expressions { key = "app.kubernetes.io/name" operator = "In" values = ["coder-workspace"] } } } } } } } }