Issue
You need to configure template-level settings that aren't accessible directly through the workspace's main.tf
file, such as port sharing permissions.
Overview
Some configuration options in Coder operate at the template level rather than the workspace level. For example, while you can set the share
parameter in a workspace, the maximum sharing level allowed (owner/authenticated/public) is controlled by the template's max_port_share_level
setting.
These template-level guardrails cannot be modified through the standard workspace Terraform resources, requiring a different approach.
Solution
You can configure template-level settings by using Terraform with the coderd
provider outside the Coder UI. This allows you to programmatically manage template configurations.
Step 1: Pull or Create a Template Configuration
Start by either pulling an existing template or creating a new configuration file:
coder template pull
Step 2: Create a Template-Level Terraform Configuration
Create a main.tf
file with the following structure:
terraform {
required_providers {
coderd = {
source = "coder/coderd"
}
}
}
provider "coderd" {
URL = "https://$FQDN"
token = "$CODER_TOKEN"
}
data "coderd_template" "windows_main" {
name = "$TEMPLATE_NAME"
}
output "template" {
value = "${jsonencode(data.coderd_template.windows_main)}"
}
data "coderd_user" "coder" {
# This must be a valid Coder user account
username = "$CODER_USER"
}
resource "coderd_template" "ubuntu-main" {
name = "$TEMPLATE_NAME"
max_port_share_level = "public|sharing|owner"
description = "$DESCRIPTION, settings for default sharing on ports"
versions = [
{
name = "stable"
active = "true"
tf_vars = [
{
name = "namespace"
value = "coder"
}
]
description = "The stable version of the template."
directory = "./stable-template"
}
]
acl = {
users = [{
id = data.coderd_user.coder.id
role = "admin"
}]
groups = []
}
}
Step 3: Create Your Workspace Template
Create a directory called ./stable-template
and add your workspace main.tf
file there. This file should contain your normal workspace configuration, including resources, agents, and applications.
Within this file, you can include a parameter to allow users to select their preferred sharing level (within the constraints set by the template).
terraform {
required_providers {
coder = {
source = "coder/coder"
}
kubernetes = {
source = "hashicorp/kubernetes"
}
coderd = {
source = "coder/coderd"
}
}
}
module "jetbrains_gateway" {
count = data.coder_workspace.me.start_count
source = "registry.coder.com/modules/jetbrains-gateway/coder"
version = "1.0.28"
agent_id = coder_agent.main.id
folder = "/home/coder"
jetbrains_ides = ["CL", "GO", "IU", "PY", "WS"]
default = "GO"
}
module "filebrowser" {
count = data.coder_workspace.me.start_count
source = "registry.coder.com/modules/filebrowser/coder"
version = "1.0.23"
agent_id = coder_agent.main.id
}
# Configurable 'share' levels defined here:
data "coder_parameter" "port_share_level" {
name = "port_share_level"
display_name = "Port Share Level"
description = "How widely workspace ports can be shared"
type = "string"
default = "owner"
icon = "/emojis/1f510.png"
mutable = true
option {
name = "Owner only"
value = "owner"
icon = "/emojis/1f510.png"
}
option {
name = "Authenticated users"
value = "authenticated"
icon = "/emojis/1f users.png"
}
option {
name = "Public"
value = "public"
icon = "/emojis/1f30e.png"
}
validation {
regex = "^(owner|authenticated|public)$"
error = "Value must be one of: owner, authenticated, or public."
}
}
provider "coder" {}
variable "use_kubeconfig" {
type = bool
default = false
description = >>-EOF
Use host kubeconfig? (true/false)
Set this to false if the Coder host is itself running as a Pod on the same
Kubernetes cluster as you are deploying workspaces to.
Set this to true if the Coder host is running outside the Kubernetes cluster
for workspaces. A valid "~/.kube/config" must be present on the Coder host.
EOF
}
variable "namespace" {
type = string
description = "The Kubernetes namespace to create workspaces in (must exist prior to creating workspaces). If the Coder host is itself running as a Pod on the same Kubernetes cluster as you are deploying workspaces to, set this to the same namespace."
}
data "coder_parameter" "cpu" {
name = "cpu"
display_name = "CPU"
description = "The number of CPU cores"
default = "2"
icon = "/icon/memory.svg"
mutable = true
option {
name = "2 Cores"
value = "2"
}
option {
name = "8 Cores"
value = "8"
}
}
data "coder_parameter" "memory" {
name = "memory"
display_name = "Memory"
description = "The amount of memory in GB"
default = "2"
icon = "/icon/memory.svg"
mutable = true
option {
name = "2 GB"
value = "2"
}
option {
name = "8 GB"
value = "8"
}
}
data "coder_parameter" "home_disk_size" {
name = "home_disk_size"
display_name = "Home disk size"
description = "The size of the home disk in GB"
default = "10"
type = "number"
icon = "/emojis/1f4be.png"
mutable = false
validation {
min = 1
max = 99999
}
}
provider "kubernetes" {
config_path = var.use_kubeconfig == true ? "~/.kube/config" : null
}
data "coder_workspace" "me" {}
data "coder_workspace_owner" "me" {}
resource "coder_agent" "main" {
os = "linux"
arch = "amd64"
startup_script = >>-EOT
set -e
curl -fsSL https://code-server.dev/install.sh | sh -s -- --method=standalone --prefix=/tmp/code-server
/tmp/code-server/bin/code-server --install-extension ms-toolsai.jupyter
/tmp/code-server/bin/code-server --auth none --port 13337 </tmp/code-server.log 2<&1 &
EOT
metadata {
display_name = "CPU Usage"
key = "0_cpu_usage"
script = "coder stat cpu"
interval = 10
timeout = 1
}
metadata {
display_name = "RAM Usage"
key = "1_ram_usage"
script = "coder stat mem"
interval = 10
timeout = 1
}
metadata {
display_name = "Home Disk"
key = "3_home_disk"
script = "coder stat disk --path $${HOME}"
interval = 60
timeout = 1
}
metadata {
display_name = "CPU Usage (Host)"
key = "4_cpu_usage_host"
script = "coder stat cpu --host"
interval = 10
timeout = 1
}
metadata {
display_name = "Memory Usage (Host)"
key = "5_mem_usage_host"
script = "coder stat mem --host"
interval = 10
timeout = 1
}
metadata {
display_name = "Load Average (Host)"
key = "6_load_host"
script = >>-EOT
echo "`cat /proc/loadavg | awk '{ print $1 }'` `nproc`" | awk '{ printf "%0.2f", $1/$2 }'
EOT
interval = 60
timeout = 1
}
}
resource "coder_app" "code-server" {
agent_id = coder_agent.main.id
slug = "code-server"
display_name = "code-server"
icon = "/icon/code.svg"
url = "http://localhost:13337?folder=/home/coder"
subdomain = true
share = "owner"
healthcheck {
url = "http://localhost:13337/healthz"
interval = 3
threshold = 10
}
}
locals {
common_labels = {
"app.kubernetes.io/instance" = "coder-workspace-${data.coder_workspace.me.id}"
"app.kubernetes.io/part-of" = "coder"
"com.coder.resource" = "true"
"com.coder.workspace.id" = data.coder_workspace.me.id
"com.coder.workspace.name" = data.coder_workspace.me.name
"com.coder.user.id" = data.coder_workspace_owner.me.id
"com.coder.user.username" = data.coder_workspace_owner.me.name
}
ws_labels = merge(local.common_labels, {
"app.kubernetes.io/name" = "coder-workspace"
})
pvc_labels = merge(local.common_labels, {
"app.kubernetes.io/name" = "coder-pvc"
})
common_annotations = {
"com.coder.user.email" = data.coder_workspace_owner.me.email
}
}
resource "kubernetes_persistent_volume_claim" "home" {
metadata {
name = "coder-${data.coder_workspace.me.id}-home"
namespace = var.namespace
labels = local.pvc_labels
annotations = local.common_annotations
}
wait_until_bound = false
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = "${data.coder_parameter.home_disk_size.value}Gi"
}
}
}
}
resource "kubernetes_deployment" "main" {
count = data.coder_workspace.me.start_count
depends_on = [kubernetes_persistent_volume_claim.home]
wait_for_rollout = false
metadata {
name = "coder-${data.coder_workspace.me.id}"
namespace = var.namespace
labels = local.ws_labels
annotations = local.common_annotations
}
spec {
replicas = 1
selector {
match_labels = local.ws_labels
strategy {
type = "Recreate"
}
template {
metadata {
labels = local.ws_labels
}
spec {
security_context {
run_as_user = 1000
fs_group = 1000
}
container {
name = "dev"
image = "codercom/enterprise-base:ubuntu"
image_pull_policy = "Always"
command = ["sh", "-c", coder_agent.main.init_script]
security_context {
run_as_user = 1000
}
env {
name = "CODER_AGENT_TOKEN"
value = coder_agent.main.token
}
resources {
requests = {
cpu = "250m"
memory = "512Mi"
}
limits = {
cpu = data.coder_parameter.cpu.value
memory = "${data.coder_parameter.memory.value}Gi"
}
}
volume_mount {
mount_path = "/home/coder"
name = "home"
read_only = false
}
}
volume {
name = "home"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim.home.metadata[0].name
read_only = false
}
}
affinity {
pod_anti_affinity {
preferred_during_scheduling_ignored_during_execution {
weight = 1
pod_affinity_term {
topology_key = "kubernetes.io/hostname"
label_selector {
match_expressions {
key = "app.kubernetes.io/name"
operator = "In"
values = ["coder-workspace"]
}
}
}
}
}
}
}
}
}
}
Step 4: Apply the Configuration
Initialize and apply your Terraform configuration:
terraform init
terraform plan
terraform apply
Additional Information
- The template-level
max_port_share_level
setting acts as a ceiling - users cannot share ports beyond this level - In Coder Enterprise, the default maximum sharing level is "owner" (most restrictive)
- In Coder OSS, the default maximum sharing level is "public" (least restrictive)
- Changes to template settings require admin privileges
For more information, refer to: