From d37f6d80f77694b2fb890635d58299346dd59c3a Mon Sep 17 00:00:00 2001 From: Muhammad Atif Ali Date: Wed, 23 Aug 2023 12:27:57 +0300 Subject: [PATCH] chore(docs): update docs for correct use of shell and console and enforce linewidth (#9245) --- .prettierrc.yaml | 10 +- README.md | 2 +- SECURITY.md | 86 +++--- docs/CONTRIBUTING.md | 139 ++++++--- docs/about/architecture.md | 33 ++- docs/admin/app-logs.md | 19 +- docs/admin/appearance.md | 12 +- docs/admin/audit-logs.md | 40 ++- docs/admin/auth.md | 239 ++++++++++------ docs/admin/automation.md | 37 ++- docs/admin/configure.md | 91 +++--- docs/admin/git-providers.md | 65 +++-- docs/admin/groups.md | 7 +- docs/admin/high-availability.md | 46 +-- docs/admin/prometheus.md | 32 ++- docs/admin/provisioners.md | 104 ++++--- docs/admin/quotas.md | 37 +-- docs/admin/rbac.md | 10 +- docs/admin/scale.md | 191 ++++++++----- docs/admin/telemetry.md | 27 +- docs/admin/upgrade.md | 25 +- docs/admin/users.md | 83 ++++-- docs/admin/workspace-proxies.md | 67 +++-- docs/api/authentication.md | 2 +- docs/api/index.md | 4 +- docs/changelogs/README.md | 2 +- docs/changelogs/v0.25.0.md | 42 ++- docs/changelogs/v0.26.0.md | 28 +- docs/changelogs/v0.26.1.md | 16 +- docs/changelogs/v0.27.0.md | 60 ++-- docs/changelogs/v0.27.1.md | 7 +- docs/changelogs/v0.27.3.md | 7 +- docs/changelogs/v2.0.0.md | 130 ++++++--- docs/changelogs/v2.0.2.md | 52 +++- docs/changelogs/v2.1.0.md | 47 +++- docs/changelogs/v2.1.1.md | 28 +- docs/cli.md | 9 +- docs/contributing/CODE_OF_CONDUCT.md | 31 ++- docs/contributing/SECURITY.md | 4 +- docs/contributing/documentation.md | 31 ++- docs/contributing/feature-stages.md | 14 +- docs/contributing/frontend.md | 156 ++++++++--- docs/dotfiles.md | 17 +- docs/enterprise.md | 6 +- docs/ides.md | 28 +- docs/ides/emacs-tramp.md | 82 ++++-- docs/ides/gateway.md | 33 ++- docs/ides/remote-desktops.md | 20 +- docs/ides/web-ides.md | 15 +- docs/install/binary.md | 21 +- docs/install/database.md | 30 +- docs/install/docker.md | 48 ++-- docs/install/install.sh.md | 13 +- docs/install/kubernetes.md | 81 +++--- docs/install/offline.md | 64 +++-- docs/install/openshift.md | 101 +++---- docs/install/packages.md | 9 +- docs/install/uninstall.md | 8 +- docs/install/windows.md | 14 +- docs/networking/index.md | 53 ++-- docs/networking/port-forwarding.md | 51 ++-- docs/platforms/aws.md | 51 +++- docs/platforms/azure.md | 70 +++-- docs/platforms/docker.md | 37 ++- docs/platforms/google-cloud-platform.md | 65 +++-- docs/platforms/jfrog.md | 77 +++-- .../kubernetes/additional-clusters.md | 64 +++-- docs/platforms/kubernetes/deployment-logs.md | 33 ++- docs/platforms/kubernetes/index.md | 12 +- docs/platforms/other.md | 5 +- docs/secrets.md | 39 +-- .../0001_user_apikeys_invalidation.md | 35 ++- docs/security/index.md | 13 +- docs/templates/README.md | 28 +- docs/templates/agent-metadata.md | 46 +-- docs/templates/authentication.md | 33 ++- docs/templates/change-management.md | 10 +- docs/templates/devcontainers.md | 42 ++- docs/templates/docker-in-workspaces.md | 122 +++++--- docs/templates/index.md | 263 +++++++++++++----- docs/templates/modules.md | 60 ++-- docs/templates/open-in-coder.md | 26 +- docs/templates/parameters.md | 115 +++++--- docs/templates/resource-metadata.md | 17 +- docs/templates/resource-persistence.md | 36 +-- docs/workspaces.md | 61 ++-- dogfood/guide.md | 50 +++- examples/lima/README.md | 2 +- examples/templates/community-templates.md | 33 ++- examples/web-server/apache/README.md | 26 +- examples/web-server/caddy/README.md | 10 +- examples/web-server/nginx/README.md | 28 +- helm/provisioner/Chart.yaml | 4 +- .../apidocgen/markdown-template/security.def | 2 +- scripts/apidocgen/postprocess/main.go | 4 +- site/.prettierrc.yaml | 10 +- 96 files changed, 2838 insertions(+), 1457 deletions(-) diff --git a/.prettierrc.yaml b/.prettierrc.yaml index 9ba1d2ca9d..7fe31e7338 100644 --- a/.prettierrc.yaml +++ b/.prettierrc.yaml @@ -2,6 +2,7 @@ # formatting for prettier-supported files. See `.editorconfig` and # `site/.editorconfig`for whitespace formatting options. printWidth: 80 +proseWrap: always semi: false trailingComma: all useTabs: false @@ -9,10 +10,9 @@ tabWidth: 2 overrides: - files: - README.md + - docs/api/**/*.md + - docs/cli/**/*.md + - .github/**/*.{yaml,yml,toml} + - scripts/**/*.{yaml,yml,toml} options: proseWrap: preserve - - files: - - "site/**/*.yaml" - - "site/**/*.yml" - options: - proseWrap: always diff --git a/README.md b/README.md index 9443eb6b70..3f7d835125 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ You can run the install script with `--dry-run` to see the commands that will be Once installed, you can start a production deployment1 with a single command: -```console +```shell # Automatically sets up an external access URL on *.try.coder.app coder server diff --git a/SECURITY.md b/SECURITY.md index 46986c9d3a..ee5ac8075e 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,7 +1,7 @@ # Coder Security -Coder welcomes feedback from security researchers and the general public -to help improve our security. If you believe you have discovered a vulnerability, +Coder welcomes feedback from security researchers and the general public to help +improve our security. If you believe you have discovered a vulnerability, privacy issue, exposed data, or other security issues in any of our assets, we want to hear from you. This policy outlines steps for reporting vulnerabilities to us, what we expect, what you can expect from us. @@ -10,64 +10,72 @@ You can see the pretty version [here](https://coder.com/security/policy) # Why Coder's security matters -If an attacker could fully compromise a Coder installation, they could spin -up expensive workstations, steal valuable credentials, or steal proprietary -source code. We take this risk very seriously and employ routine pen testing, -vulnerability scanning, and code reviews. We also welcome the contributions -from the community that helped make this product possible. +If an attacker could fully compromise a Coder installation, they could spin up +expensive workstations, steal valuable credentials, or steal proprietary source +code. We take this risk very seriously and employ routine pen testing, +vulnerability scanning, and code reviews. We also welcome the contributions from +the community that helped make this product possible. # Where should I report security issues? -Please report security issues to security@coder.com, providing -all relevant information. The more details you provide, the easier it will be -for us to triage and fix the issue. +Please report security issues to security@coder.com, providing all relevant +information. The more details you provide, the easier it will be for us to +triage and fix the issue. # Out of Scope -Our primary concern is around an abuse of the Coder application that allows -an attacker to gain access to another users workspace, or spin up unwanted +Our primary concern is around an abuse of the Coder application that allows an +attacker to gain access to another users workspace, or spin up unwanted workspaces. - DOS/DDOS attacks affecting availability --> While we do support rate limiting - of requests, we primarily leave this to the owner of the Coder installation. Our - rationale is that a DOS attack only affecting availability is not a valuable - target for attackers. + of requests, we primarily leave this to the owner of the Coder installation. + Our rationale is that a DOS attack only affecting availability is not a + valuable target for attackers. - Abuse of a compromised user credential --> If a user credential is compromised - outside of the Coder ecosystem, then we consider it beyond the scope of our application. - However, if an unprivileged user could escalate their permissions or gain access - to another workspace, that is a cause for concern. + outside of the Coder ecosystem, then we consider it beyond the scope of our + application. However, if an unprivileged user could escalate their permissions + or gain access to another workspace, that is a cause for concern. - Vulnerabilities in third party systems --> Vulnerabilities discovered in - out-of-scope systems should be reported to the appropriate vendor or applicable authority. + out-of-scope systems should be reported to the appropriate vendor or + applicable authority. # Our Commitments When working with us, according to this policy, you can expect us to: -- Respond to your report promptly, and work with you to understand and validate your report; -- Strive to keep you informed about the progress of a vulnerability as it is processed; -- Work to remediate discovered vulnerabilities in a timely manner, within our operational constraints; and -- Extend Safe Harbor for your vulnerability research that is related to this policy. +- Respond to your report promptly, and work with you to understand and validate + your report; +- Strive to keep you informed about the progress of a vulnerability as it is + processed; +- Work to remediate discovered vulnerabilities in a timely manner, within our + operational constraints; and +- Extend Safe Harbor for your vulnerability research that is related to this + policy. # Our Expectations -In participating in our vulnerability disclosure program in good faith, we ask that you: +In participating in our vulnerability disclosure program in good faith, we ask +that you: -- Play by the rules, including following this policy and any other relevant agreements. - If there is any inconsistency between this policy and any other applicable terms, the - terms of this policy will prevail; +- Play by the rules, including following this policy and any other relevant + agreements. If there is any inconsistency between this policy and any other + applicable terms, the terms of this policy will prevail; - Report any vulnerability you’ve discovered promptly; -- Avoid violating the privacy of others, disrupting our systems, destroying data, and/or - harming user experience; +- Avoid violating the privacy of others, disrupting our systems, destroying + data, and/or harming user experience; - Use only the Official Channels to discuss vulnerability information with us; -- Provide us a reasonable amount of time (at least 90 days from the initial report) to - resolve the issue before you disclose it publicly; -- Perform testing only on in-scope systems, and respect systems and activities which - are out-of-scope; -- If a vulnerability provides unintended access to data: Limit the amount of data you - access to the minimum required for effectively demonstrating a Proof of Concept; and - cease testing and submit a report immediately if you encounter any user data during testing, - such as Personally Identifiable Information (PII), Personal Healthcare Information (PHI), - credit card data, or proprietary information; -- You should only interact with test accounts you own or with explicit permission from +- Provide us a reasonable amount of time (at least 90 days from the initial + report) to resolve the issue before you disclose it publicly; +- Perform testing only on in-scope systems, and respect systems and activities + which are out-of-scope; +- If a vulnerability provides unintended access to data: Limit the amount of + data you access to the minimum required for effectively demonstrating a Proof + of Concept; and cease testing and submit a report immediately if you encounter + any user data during testing, such as Personally Identifiable Information + (PII), Personal Healthcare Information (PHI), credit card data, or proprietary + information; +- You should only interact with test accounts you own or with explicit + permission from - the account holder; and - Do not engage in extortion. diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 291f4e1444..710152a9f3 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -2,7 +2,11 @@ ## Requirements -We recommend using the [Nix](https://nix.dev/) package manager as it makes any pain related to maintaining dependency versions [just disappear](https://twitter.com/mitchellh/status/1491102567296040961). Once nix [has been installed](https://nixos.org/download.html) the development environment can be _manually instantiated_ through the `nix-shell` command: +We recommend using the [Nix](https://nix.dev/) package manager as it makes any +pain related to maintaining dependency versions +[just disappear](https://twitter.com/mitchellh/status/1491102567296040961). Once +nix [has been installed](https://nixos.org/download.html) the development +environment can be _manually instantiated_ through the `nix-shell` command: ```shell cd ~/code/coder @@ -17,7 +21,10 @@ copying path '/nix/store/v2gvj8whv241nj4lzha3flq8pnllcmvv-ignore-5.2.0.tgz' from ... ``` -If [direnv](https://direnv.net/) is installed and the [hooks are configured](https://direnv.net/docs/hook.html) then the development environment can be _automatically instantiated_ by creating the following `.envrc`, thus removing the need to run `nix-shell` by hand! +If [direnv](https://direnv.net/) is installed and the +[hooks are configured](https://direnv.net/docs/hook.html) then the development +environment can be _automatically instantiated_ by creating the following +`.envrc`, thus removing the need to run `nix-shell` by hand! ```shell cd ~/code/coder @@ -25,7 +32,9 @@ echo "use nix" >.envrc direnv allow ``` -Now, whenever you enter the project folder, [`direnv`](https://direnv.net/docs/hook.html) will prepare the environment for you: +Now, whenever you enter the project folder, +[`direnv`](https://direnv.net/docs/hook.html) will prepare the environment for +you: ```shell cd ~/code/coder @@ -37,7 +46,8 @@ direnv: export +AR +AS +CC +CONFIG_SHELL +CXX +HOST_PATH +IN_NIX_SHELL +LD +NIX_ 🎉 ``` -Alternatively if you do not want to use nix then you'll need to install the need the following tools by hand: +Alternatively if you do not want to use nix then you'll need to install the need +the following tools by hand: - Go 1.18+ - on macOS, run `brew install go` @@ -76,35 +86,46 @@ Use the following `make` commands and scripts in development: - Run `./scripts/develop.sh` - Access `http://localhost:8080` -- The default user is `admin@coder.com` and the default password is `SomeSecurePassword!` +- The default user is `admin@coder.com` and the default password is + `SomeSecurePassword!` ### Deploying a PR -You can test your changes by creating a PR deployment. There are two ways to do this: +You can test your changes by creating a PR deployment. There are two ways to do +this: 1. By running `./scripts/deploy-pr.sh` -2. By manually triggering the [`pr-deploy.yaml`](https://github.com/coder/coder/actions/workflows/pr-deploy.yaml) GitHub Action workflow - ![Deploy PR manually](./images/deploy-pr-manually.png) +2. By manually triggering the + [`pr-deploy.yaml`](https://github.com/coder/coder/actions/workflows/pr-deploy.yaml) + GitHub Action workflow ![Deploy PR manually](./images/deploy-pr-manually.png) #### Available options - `-d` or `--deploy`, force deploys the PR by deleting the existing deployment. -- `-b` or `--build`, force builds the Docker image. (generally not needed as we are intelligently checking if the image needs to be built) -- `-e EXPERIMENT1,EXPERIMENT2` or `--experiments EXPERIMENT1,EXPERIMENT2`, will enable the specified experiments. (defaults to `*`) -- `-n` or `--dry-run` will display the context without deployment. e.g., branch name and PR number, etc. +- `-b` or `--build`, force builds the Docker image. (generally not needed as we + are intelligently checking if the image needs to be built) +- `-e EXPERIMENT1,EXPERIMENT2` or `--experiments EXPERIMENT1,EXPERIMENT2`, will + enable the specified experiments. (defaults to `*`) +- `-n` or `--dry-run` will display the context without deployment. e.g., branch + name and PR number, etc. - `-y` or `--yes`, will skip the CLI confirmation prompt. -> Note: PR deployment will be re-deployed automatically when the PR is updated. It will use the last values automatically for redeployment. +> Note: PR deployment will be re-deployed automatically when the PR is updated. +> It will use the last values automatically for redeployment. -> You need to be a member or collaborator of the of [coder](github.com/coder) GitHub organization to be able to deploy a PR. +> You need to be a member or collaborator of the of [coder](github.com/coder) +> GitHub organization to be able to deploy a PR. -Once the deployment is finished, a unique link and credentials will be posted in the [#pr-deployments](https://codercom.slack.com/archives/C05DNE982E8) Slack channel. +Once the deployment is finished, a unique link and credentials will be posted in +the [#pr-deployments](https://codercom.slack.com/archives/C05DNE982E8) Slack +channel. ### Adding database migrations and fixtures #### Database migrations -Database migrations are managed with [`migrate`](https://github.com/golang-migrate/migrate). +Database migrations are managed with +[`migrate`](https://github.com/golang-migrate/migrate). To add new migrations, use the following command: @@ -125,11 +146,15 @@ much data as possible. There are two types of fixtures that are used to test that migrations don't break existing Coder deployments: -- Partial fixtures [`migrations/testdata/fixtures`](../coderd/database/migrations/testdata/fixtures) -- Full database dumps [`migrations/testdata/full_dumps`](../coderd/database/migrations/testdata/full_dumps) +- Partial fixtures + [`migrations/testdata/fixtures`](../coderd/database/migrations/testdata/fixtures) +- Full database dumps + [`migrations/testdata/full_dumps`](../coderd/database/migrations/testdata/full_dumps) -Both types behave like database migrations (they also [`migrate`](https://github.com/golang-migrate/migrate)). Their behavior mirrors Coder migrations such that when migration -number `000022` is applied, fixture `000022` is applied afterwards. +Both types behave like database migrations (they also +[`migrate`](https://github.com/golang-migrate/migrate)). Their behavior mirrors +Coder migrations such that when migration number `000022` is applied, fixture +`000022` is applied afterwards. Partial fixtures are used to conveniently add data to newly created tables so that we can ensure that this data is migrated without issue. @@ -175,19 +200,20 @@ This helps in naming the dump (e.g. `000069` above). ### Documentation -Our style guide for authoring documentation can be found [here](./contributing/documentation.md). +Our style guide for authoring documentation can be found +[here](./contributing/documentation.md). ### Backend #### Use Go style -Contributions must adhere to the guidelines outlined in [Effective -Go](https://go.dev/doc/effective_go). We prefer linting rules over documenting -styles (run ours with `make lint`); humans are error-prone! +Contributions must adhere to the guidelines outlined in +[Effective Go](https://go.dev/doc/effective_go). We prefer linting rules over +documenting styles (run ours with `make lint`); humans are error-prone! -Read [Go's Code Review Comments -Wiki](https://github.com/golang/go/wiki/CodeReviewComments) for information on -common comments made during reviews of Go code. +Read +[Go's Code Review Comments Wiki](https://github.com/golang/go/wiki/CodeReviewComments) +for information on common comments made during reviews of Go code. #### Avoid unused packages @@ -202,8 +228,8 @@ Our frontend guide can be found [here](./contributing/frontend.md). ## Reviews -> The following information has been borrowed from [Go's review -> philosophy](https://go.dev/doc/contribute#reviews). +> The following information has been borrowed from +> [Go's review philosophy](https://go.dev/doc/contribute#reviews). Coder values thorough reviews. For each review comment that you receive, please "close" it by implementing the suggestion or providing an explanation on why the @@ -220,27 +246,45 @@ be applied selectively or to discourage anyone from contributing. ## Releases -Coder releases are initiated via [`./scripts/release.sh`](../scripts/release.sh) and automated via GitHub Actions. Specifically, the [`release.yaml`](../.github/workflows/release.yaml) workflow. They are created based on the current [`main`](https://github.com/coder/coder/tree/main) branch. +Coder releases are initiated via [`./scripts/release.sh`](../scripts/release.sh) +and automated via GitHub Actions. Specifically, the +[`release.yaml`](../.github/workflows/release.yaml) workflow. They are created +based on the current [`main`](https://github.com/coder/coder/tree/main) branch. -The release notes for a release are automatically generated from commit titles and metadata from PRs that are merged into `main`. +The release notes for a release are automatically generated from commit titles +and metadata from PRs that are merged into `main`. ### Creating a release -The creation of a release is initiated via [`./scripts/release.sh`](../scripts/release.sh). This script will show a preview of the release that will be created, and if you choose to continue, create and push the tag which will trigger the creation of the release via GitHub Actions. +The creation of a release is initiated via +[`./scripts/release.sh`](../scripts/release.sh). This script will show a preview +of the release that will be created, and if you choose to continue, create and +push the tag which will trigger the creation of the release via GitHub Actions. See `./scripts/release.sh --help` for more information. ### Creating a release (via workflow dispatch) -Typically the workflow dispatch is only used to test (dry-run) a release, meaning no actual release will take place. The workflow can be dispatched manually from [Actions: Release](https://github.com/coder/coder/actions/workflows/release.yaml). Simply press "Run workflow" and choose dry-run. +Typically the workflow dispatch is only used to test (dry-run) a release, +meaning no actual release will take place. The workflow can be dispatched +manually from +[Actions: Release](https://github.com/coder/coder/actions/workflows/release.yaml). +Simply press "Run workflow" and choose dry-run. -If a release has failed after the tag has been created and pushed, it can be retried by again, pressing "Run workflow", changing "Use workflow from" from "Branch: main" to "Tag: vX.X.X" and not selecting dry-run. +If a release has failed after the tag has been created and pushed, it can be +retried by again, pressing "Run workflow", changing "Use workflow from" from +"Branch: main" to "Tag: vX.X.X" and not selecting dry-run. ### Commit messages -Commit messages should follow the [Conventional Commits 1.0.0](https://www.conventionalcommits.org/en/v1.0.0/) specification. +Commit messages should follow the +[Conventional Commits 1.0.0](https://www.conventionalcommits.org/en/v1.0.0/) +specification. -Allowed commit types (`feat`, `fix`, etc.) are listed in [conventional-commit-types](https://github.com/commitizen/conventional-commit-types/blob/c3a9be4c73e47f2e8197de775f41d981701407fb/index.json). Note that these types are also used to automatically sort and organize the release notes. +Allowed commit types (`feat`, `fix`, etc.) are listed in +[conventional-commit-types](https://github.com/commitizen/conventional-commit-types/blob/c3a9be4c73e47f2e8197de775f41d981701407fb/index.json). +Note that these types are also used to automatically sort and organize the +release notes. A good commit message title uses the imperative, present tense and is ~50 characters long (no more than 72). @@ -250,21 +294,34 @@ Examples: - Good: `feat(api): add feature X` - Bad: `feat(api): added feature X` (past tense) -A good rule of thumb for writing good commit messages is to recite: [If applied, this commit will ...](https://reflectoring.io/meaningful-commit-messages/). +A good rule of thumb for writing good commit messages is to recite: +[If applied, this commit will ...](https://reflectoring.io/meaningful-commit-messages/). -**Note:** We lint PR titles to ensure they follow the Conventional Commits specification, however, it's still possible to merge PRs on GitHub with a badly formatted title. Take care when merging single-commit PRs as GitHub may prefer to use the original commit title instead of the PR title. +**Note:** We lint PR titles to ensure they follow the Conventional Commits +specification, however, it's still possible to merge PRs on GitHub with a badly +formatted title. Take care when merging single-commit PRs as GitHub may prefer +to use the original commit title instead of the PR title. ### Breaking changes Breaking changes can be triggered in two ways: -- Add `!` to the commit message title, e.g. `feat(api)!: remove deprecated endpoint /test` -- Add the [`release/breaking`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Arelease%2Fbreaking) label to a PR that has, or will be, merged into `main`. +- Add `!` to the commit message title, e.g. + `feat(api)!: remove deprecated endpoint /test` +- Add the + [`release/breaking`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Arelease%2Fbreaking) + label to a PR that has, or will be, merged into `main`. ### Security -The [`security`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Asecurity) label can be added to PRs that have, or will be, merged into `main`. Doing so will make sure the change stands out in the release notes. +The +[`security`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Asecurity) +label can be added to PRs that have, or will be, merged into `main`. Doing so +will make sure the change stands out in the release notes. ### Experimental -The [`release/experimental`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Arelease%2Fexperimental) label can be used to move the note to the bottom of the release notes under a separate title. +The +[`release/experimental`](https://github.com/coder/coder/issues?q=sort%3Aupdated-desc+label%3Arelease%2Fexperimental) +label can be used to move the note to the bottom of the release notes under a +separate title. diff --git a/docs/about/architecture.md b/docs/about/architecture.md index 45ef36b99b..9489ee7fc8 100644 --- a/docs/about/architecture.md +++ b/docs/about/architecture.md @@ -8,9 +8,9 @@ This document provides a high level overview of Coder's architecture. ## coderd -coderd is the service created by running `coder server`. It is a thin -API that connects workspaces, provisioners and users. coderd stores its state in -Postgres and is the only service that communicates with Postgres. +coderd is the service created by running `coder server`. It is a thin API that +connects workspaces, provisioners and users. coderd stores its state in Postgres +and is the only service that communicates with Postgres. It offers: @@ -22,16 +22,18 @@ It offers: ## provisionerd -provisionerd is the execution context for infrastructure modifying providers. -At the moment, the only provider is Terraform (running `terraform`). +provisionerd is the execution context for infrastructure modifying providers. At +the moment, the only provider is Terraform (running `terraform`). -By default, the Coder server runs multiple provisioner daemons. [External provisioners](../admin/provisioners.md) can be added for security or scalability purposes. +By default, the Coder server runs multiple provisioner daemons. +[External provisioners](../admin/provisioners.md) can be added for security or +scalability purposes. ## Agents -An agent is the Coder service that runs within a user's remote workspace. -It provides a consistent interface for coderd and clients to communicate -with workspaces regardless of operating system, architecture, or cloud. +An agent is the Coder service that runs within a user's remote workspace. It +provides a consistent interface for coderd and clients to communicate with +workspaces regardless of operating system, architecture, or cloud. It offers the following services along with much more: @@ -40,15 +42,20 @@ It offers the following services along with much more: - Liveness checks - `startup_script` automation -Templates are responsible for [creating and running agents](../templates/index.md#coder-agent) within workspaces. +Templates are responsible for +[creating and running agents](../templates/index.md#coder-agent) within +workspaces. ## Service Bundling -While coderd and Postgres can be orchestrated independently,our default installation -paths bundle them all together into one system service. It's perfectly fine to run a production deployment this way, but there are certain situations that necessitate decomposition: +While coderd and Postgres can be orchestrated independently,our default +installation paths bundle them all together into one system service. It's +perfectly fine to run a production deployment this way, but there are certain +situations that necessitate decomposition: - Reducing global client latency (distribute coderd and centralize database) -- Achieving greater availability and efficiency (horizontally scale individual services) +- Achieving greater availability and efficiency (horizontally scale individual + services) ## Workspaces diff --git a/docs/admin/app-logs.md b/docs/admin/app-logs.md index 87efe05ae6..8235fda06e 100644 --- a/docs/admin/app-logs.md +++ b/docs/admin/app-logs.md @@ -1,21 +1,28 @@ # Application Logs -In Coderd, application logs refer to the records of events, messages, and activities generated by the application during its execution. -These logs provide valuable information about the application's behavior, performance, and any issues that may have occurred. +In Coderd, application logs refer to the records of events, messages, and +activities generated by the application during its execution. These logs provide +valuable information about the application's behavior, performance, and any +issues that may have occurred. -Application logs include entries that capture events on different levels of severity: +Application logs include entries that capture events on different levels of +severity: - Informational messages - Warnings - Errors - Debugging information -By analyzing application logs, system administrators can gain insights into the application's behavior, identify and diagnose problems, track performance metrics, and make informed decisions to improve the application's stability and efficiency. +By analyzing application logs, system administrators can gain insights into the +application's behavior, identify and diagnose problems, track performance +metrics, and make informed decisions to improve the application's stability and +efficiency. ## Error logs -To ensure effective monitoring and timely response to critical events in the Coder application, it is recommended to configure log alerts -that specifically watch for the following log entries: +To ensure effective monitoring and timely response to critical events in the +Coder application, it is recommended to configure log alerts that specifically +watch for the following log entries: | Log Level | Module | Log message | Potential issues | | --------- | ---------------------------- | ----------------------- | ------------------------------------------------------------------------------------------------- | diff --git a/docs/admin/appearance.md b/docs/admin/appearance.md index 5d061b3bb1..f80ffc8c1b 100644 --- a/docs/admin/appearance.md +++ b/docs/admin/appearance.md @@ -2,12 +2,15 @@ ## Support Links -Support links let admins adjust the user dropdown menu to include links referring to internal company resources. The menu section replaces the original menu positions: documentation, report a bug to GitHub, or join the Discord server. +Support links let admins adjust the user dropdown menu to include links +referring to internal company resources. The menu section replaces the original +menu positions: documentation, report a bug to GitHub, or join the Discord +server. ![support links](../images/admin/support-links.png) -Custom links can be set in the deployment configuration using the `-c ` -flag to `coder server`. +Custom links can be set in the deployment configuration using the +`-c ` flag to `coder server`. ```yaml supportLinks: @@ -27,7 +30,8 @@ The link icons are optional, and limited to: `bug`, `chat`, and `docs`. ## Service Banners (enterprise) -Service Banners let admins post important messages to all site users. Only Site Owners may set the service banner. +Service Banners let admins post important messages to all site users. Only Site +Owners may set the service banner. ![service banners](../images/admin/service-banners.png) diff --git a/docs/admin/audit-logs.md b/docs/admin/audit-logs.md index 143ff59344..3ad9395e35 100644 --- a/docs/admin/audit-logs.md +++ b/docs/admin/audit-logs.md @@ -1,7 +1,6 @@ # Audit Logs -Audit Logs allows **Auditors** to monitor user operations in -their deployment. +Audit Logs allows **Auditors** to monitor user operations in their deployment. ## Tracked Events @@ -27,34 +26,48 @@ We track the following resources: ## Filtering logs -In the Coder UI you can filter your audit logs using the pre-defined filter or by using the Coder's filter query like the examples below: +In the Coder UI you can filter your audit logs using the pre-defined filter or +by using the Coder's filter query like the examples below: - `resource_type:workspace action:delete` to find deleted workspaces - `resource_type:template action:create` to find created templates The supported filters are: -- `resource_type` - The type of the resource. It can be a workspace, template, user, etc. You can [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#ResourceType) all the resource types that are supported. +- `resource_type` - The type of the resource. It can be a workspace, template, + user, etc. You can + [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#ResourceType) + all the resource types that are supported. - `resource_id` - The ID of the resource. -- `resource_target` - The name of the resource. Can be used instead of `resource_id`. -- `action`- The action applied to a resource. You can [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#AuditAction) all the actions that are supported. -- `username` - The username of the user who triggered the action. You can also use `me` as a convenient alias for the logged-in user. +- `resource_target` - The name of the resource. Can be used instead of + `resource_id`. +- `action`- The action applied to a resource. You can + [find here](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#AuditAction) + all the actions that are supported. +- `username` - The username of the user who triggered the action. You can also + use `me` as a convenient alias for the logged-in user. - `email` - The email of the user who triggered the action. - `date_from` - The inclusive start date with format `YYYY-MM-DD`. - `date_to` - The inclusive end date with format `YYYY-MM-DD`. -- `build_reason` - To be used with `resource_type:workspace_build`, the [initiator](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#BuildReason) behind the build start or stop. +- `build_reason` - To be used with `resource_type:workspace_build`, the + [initiator](https://pkg.go.dev/github.com/coder/coder/v2/codersdk#BuildReason) + behind the build start or stop. ## Capturing/Exporting Audit Logs -In addition to the user interface, there are multiple ways to consume or query audit trails. +In addition to the user interface, there are multiple ways to consume or query +audit trails. ## REST API -Audit logs can be accessed through our REST API. You can find detailed information about this in our [endpoint documentation](../api/audit.md#get-audit-logs). +Audit logs can be accessed through our REST API. You can find detailed +information about this in our +[endpoint documentation](../api/audit.md#get-audit-logs). ## Service Logs -Audit trails are also dispatched as service logs and can be captured and categorized using any log management tool such as [Splunk](https://splunk.com). +Audit trails are also dispatched as service logs and can be captured and +categorized using any log management tool such as [Splunk](https://splunk.com). Example of a [JSON formatted](../cli/server.md#--log-json) audit log entry: @@ -93,10 +106,11 @@ Example of a [JSON formatted](../cli/server.md#--log-json) audit log entry: Example of a [human readable](../cli/server.md#--log-human) audit log entry: -```sh +```console 2023-06-13 03:43:29.233 [info] coderd: audit_log ID=95f7c392-da3e-480c-a579-8909f145fbe2 Time="2023-06-13T03:43:29.230422Z" UserID=6c405053-27e3-484a-9ad7-bcb64e7bfde6 OrganizationID=00000000-0000-0000-0000-000000000000 Ip= UserAgent= ResourceType=workspace_build ResourceID=988ae133-5b73-41e3-a55e-e1e9d3ef0b66 ResourceTarget="" Action=start Diff="{}" StatusCode=200 AdditionalFields="{\"workspace_name\":\"linux-container\",\"build_number\":\"7\",\"build_reason\":\"initiator\",\"workspace_owner\":\"\"}" RequestID=9682b1b5-7b9f-4bf2-9a39-9463f8e41cd6 ResourceIcon="" ``` ## Enabling this feature -This feature is only available with an enterprise license. [Learn more](../enterprise.md) +This feature is only available with an enterprise license. +[Learn more](../enterprise.md) diff --git a/docs/admin/auth.md b/docs/admin/auth.md index 4a512bfc36..fb278cf09b 100644 --- a/docs/admin/auth.md +++ b/docs/admin/auth.md @@ -14,12 +14,19 @@ The following steps explain how to set up GitHub OAuth or OpenID Connect. ### Step 1: Configure the OAuth application in GitHub -First, [register a GitHub OAuth app](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/). GitHub will ask you for the following Coder parameters: +First, +[register a GitHub OAuth app](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/). +GitHub will ask you for the following Coder parameters: -- **Homepage URL**: Set to your Coder deployments [`CODER_ACCESS_URL`](https://coder.com/docs/v2/latest/cli/server#--access-url) (e.g. `https://coder.domain.com`) +- **Homepage URL**: Set to your Coder deployments + [`CODER_ACCESS_URL`](../cli/server.md#--access-url) (e.g. + `https://coder.domain.com`) - **User Authorization Callback URL**: Set to `https://coder.domain.com` -> Note: If you want to allow multiple coder deployments hosted on subdomains e.g. coder1.domain.com, coder2.domain.com, to be able to authenticate with the same GitHub OAuth app, then you can set **User Authorization Callback URL** to the `https://domain.com` +> Note: If you want to allow multiple coder deployments hosted on subdomains +> e.g. coder1.domain.com, coder2.domain.com, to be able to authenticate with the +> same GitHub OAuth app, then you can set **User Authorization Callback URL** to +> the `https://domain.com` Note the Client ID and Client Secret generated by GitHub. You will use these values in the next step. @@ -29,17 +36,18 @@ values in the next step. Navigate to your Coder host and run the following command to start up the Coder server: -```console +```shell coder server --oauth2-github-allow-signups=true --oauth2-github-allowed-orgs="your-org" --oauth2-github-client-id="8d1...e05" --oauth2-github-client-secret="57ebc9...02c24c" ``` -> For GitHub Enterprise support, specify the `--oauth2-github-enterprise-base-url` flag. +> For GitHub Enterprise support, specify the +> `--oauth2-github-enterprise-base-url` flag. Alternatively, if you are running Coder as a system service, you can achieve the same result as the command above by adding the following environment variables to the `/etc/coder.d/coder.env` file: -```console +```env CODER_OAUTH2_GITHUB_ALLOW_SIGNUPS=true CODER_OAUTH2_GITHUB_ALLOWED_ORGS="your-org" CODER_OAUTH2_GITHUB_CLIENT_ID="8d1...e05" @@ -48,7 +56,7 @@ CODER_OAUTH2_GITHUB_CLIENT_SECRET="57ebc9...02c24c" **Note:** To allow everyone to signup using GitHub, set: -```console +```env CODER_OAUTH2_GITHUB_ALLOW_EVERYONE=true ``` @@ -76,7 +84,7 @@ coder: To upgrade Coder, run: -```console +```shell helm upgrade coder-v2/coder -n -f values.yaml ``` @@ -86,7 +94,8 @@ helm upgrade coder-v2/coder -n -f values.yaml ## OpenID Connect -The following steps through how to integrate any OpenID Connect provider (Okta, Active Directory, etc.) to Coder. +The following steps through how to integrate any OpenID Connect provider (Okta, +Active Directory, etc.) to Coder. ### Step 1: Set Redirect URI with your OIDC provider @@ -99,15 +108,15 @@ Your OIDC provider will ask you for the following parameter: Navigate to your Coder host and run the following command to start up the Coder server: -```console +```shell coder server --oidc-issuer-url="https://issuer.corp.com" --oidc-email-domain="your-domain-1,your-domain-2" --oidc-client-id="533...des" --oidc-client-secret="G0CSP...7qSM" ``` -If you are running Coder as a system service, you can achieve the -same result as the command above by adding the following environment variables -to the `/etc/coder.d/coder.env` file: +If you are running Coder as a system service, you can achieve the same result as +the command above by adding the following environment variables to the +`/etc/coder.d/coder.env` file: -```console +```env CODER_OIDC_ISSUER_URL="https://issuer.corp.com" CODER_OIDC_EMAIL_DOMAIN="your-domain-1,your-domain-2" CODER_OIDC_CLIENT_ID="533...des" @@ -134,46 +143,46 @@ coder: To upgrade Coder, run: -```console +```shell helm upgrade coder-v2/coder -n -f values.yaml ``` ## OIDC Claims -When a user logs in for the first time via OIDC, Coder will merge both -the claims from the ID token and the claims obtained from hitting the -upstream provider's `userinfo` endpoint, and use the resulting data -as a basis for creating a new user or looking up an existing user. +When a user logs in for the first time via OIDC, Coder will merge both the +claims from the ID token and the claims obtained from hitting the upstream +provider's `userinfo` endpoint, and use the resulting data as a basis for +creating a new user or looking up an existing user. -To troubleshoot claims, set `CODER_VERBOSE=true` and follow the logs -while signing in via OIDC as a new user. Coder will log the claim fields -returned by the upstream identity provider in a message containing the -string `got oidc claims`, as well as the user info returned. +To troubleshoot claims, set `CODER_VERBOSE=true` and follow the logs while +signing in via OIDC as a new user. Coder will log the claim fields returned by +the upstream identity provider in a message containing the string +`got oidc claims`, as well as the user info returned. -> **Note:** If you need to ensure that Coder only uses information from -> the ID token and does not hit the UserInfo endpoint, you can set the -> configuration option `CODER_OIDC_IGNORE_USERINFO=true`. +> **Note:** If you need to ensure that Coder only uses information from the ID +> token and does not hit the UserInfo endpoint, you can set the configuration +> option `CODER_OIDC_IGNORE_USERINFO=true`. ### Email Addresses -By default, Coder will look for the OIDC claim named `email` and use that -value for the newly created user's email address. +By default, Coder will look for the OIDC claim named `email` and use that value +for the newly created user's email address. If your upstream identity provider users a different claim, you can set `CODER_OIDC_EMAIL_FIELD` to the desired claim. -> **Note:** If this field is not present, Coder will attempt to use the -> claim field configured for `username` as an email address. If this field -> is not a valid email address, OIDC logins will fail. +> **Note** If this field is not present, Coder will attempt to use the claim +> field configured for `username` as an email address. If this field is not a +> valid email address, OIDC logins will fail. ### Email Address Verification -Coder requires all OIDC email addresses to be verified by default. If -the `email_verified` claim is present in the token response from the identity +Coder requires all OIDC email addresses to be verified by default. If the +`email_verified` claim is present in the token response from the identity provider, Coder will validate that its value is `true`. If needed, you can disable this behavior with the following setting: -```console +```env CODER_OIDC_IGNORE_EMAIL_VERIFIED=true ``` @@ -182,14 +191,14 @@ CODER_OIDC_IGNORE_EMAIL_VERIFIED=true ### Usernames -When a new user logs in via OIDC, Coder will by default use the value -of the claim field named `preferred_username` as the the username. +When a new user logs in via OIDC, Coder will by default use the value of the +claim field named `preferred_username` as the the username. -If your upstream identity provider uses a different claim, you can -set `CODER_OIDC_USERNAME_FIELD` to the desired claim. +If your upstream identity provider uses a different claim, you can set +`CODER_OIDC_USERNAME_FIELD` to the desired claim. -> **Note:** If this claim is empty, the email address will be stripped of -> the domain, and become the username (e.g. `example@coder.com` becomes `example`). +> **Note:** If this claim is empty, the email address will be stripped of the +> domain, and become the username (e.g. `example@coder.com` becomes `example`). > To avoid conflicts, Coder may also append a random word to the resulting > username. @@ -198,36 +207,38 @@ set `CODER_OIDC_USERNAME_FIELD` to the desired claim. If you'd like to change the OpenID Connect button text and/or icon, you can configure them like so: -```console +```env CODER_OIDC_SIGN_IN_TEXT="Sign in with Gitea" CODER_OIDC_ICON_URL=https://gitea.io/images/gitea.png ``` ## Disable Built-in Authentication -To remove email and password login, set the following environment variable on your -Coder deployment: +To remove email and password login, set the following environment variable on +your Coder deployment: -```console +```env CODER_DISABLE_PASSWORD_AUTH=true ``` ## SCIM (enterprise) Coder supports user provisioning and deprovisioning via SCIM 2.0 with header -authentication. Upon deactivation, users are [suspended](./users.md#suspend-a-user) -and are not deleted. [Configure](./configure.md) your SCIM application with an -auth key and supply it the Coder server. +authentication. Upon deactivation, users are +[suspended](./users.md#suspend-a-user) and are not deleted. +[Configure](./configure.md) your SCIM application with an auth key and supply it +the Coder server. -```console +```env CODER_SCIM_API_KEY="your-api-key" ``` ## TLS -If your OpenID Connect provider requires client TLS certificates for authentication, you can configure them like so: +If your OpenID Connect provider requires client TLS certificates for +authentication, you can configure them like so: -```console +```env CODER_TLS_CLIENT_CERT_FILE=/path/to/cert.pem CODER_TLS_CLIENT_KEY_FILE=/path/to/key.pem ``` @@ -237,22 +248,31 @@ CODER_TLS_CLIENT_KEY_FILE=/path/to/key.pem If your OpenID Connect provider supports group claims, you can configure Coder to synchronize groups in your auth provider to groups within Coder. -To enable group sync, ensure that the `groups` claim is set by adding the correct scope to request. If group sync is -enabled, the user's groups will be controlled by the OIDC provider. This means -manual group additions/removals will be overwritten on the next login. +To enable group sync, ensure that the `groups` claim is set by adding the +correct scope to request. If group sync is enabled, the user's groups will be +controlled by the OIDC provider. This means manual group additions/removals will +be overwritten on the next login. -```console +```env # as an environment variable CODER_OIDC_SCOPES=openid,profile,email,groups +``` + +```shell # as a flag --oidc-scopes openid,profile,email,groups ``` -With the `groups` scope requested, we also need to map the `groups` claim name. Coder recommends using `groups` for the claim name. This step is necessary if your **scope's name** is something other than `groups`. +With the `groups` scope requested, we also need to map the `groups` claim name. +Coder recommends using `groups` for the claim name. This step is necessary if +your **scope's name** is something other than `groups`. -```console +```env # as an environment variable CODER_OIDC_GROUP_FIELD=groups +``` + +```shell # as a flag --oidc-group-field groups ``` @@ -264,9 +284,12 @@ For cases when an OIDC provider only returns group IDs ([Azure AD][azure-gids]) or you want to have different group names in Coder than in your OIDC provider, you can configure mapping between the two. -```console +```env # as an environment variable CODER_OIDC_GROUP_MAPPING='{"myOIDCGroupID": "myCoderGroupName"}' +``` + +```shell # as a flag --oidc-group-mapping '{"myOIDCGroupID": "myCoderGroupName"}' ``` @@ -286,7 +309,8 @@ OIDC provider will be added to the `myCoderGroupName` group in Coder. > **Note:** Groups are only updated on login. -[azure-gids]: https://github.com/MicrosoftDocs/azure-docs/issues/59766#issuecomment-664387195 +[azure-gids]: + https://github.com/MicrosoftDocs/azure-docs/issues/59766#issuecomment-664387195 ### Troubleshooting @@ -294,22 +318,34 @@ Some common issues when enabling group sync. #### User not being assigned / Group does not exist -If you want Coder to create groups that do not exist, you can set the following environment variable. If you enable this, your OIDC provider might be sending over many unnecessary groups. Use filtering options on the OIDC provider to limit the groups sent over to prevent creating excess groups. +If you want Coder to create groups that do not exist, you can set the following +environment variable. If you enable this, your OIDC provider might be sending +over many unnecessary groups. Use filtering options on the OIDC provider to +limit the groups sent over to prevent creating excess groups. -```console +```env # as an environment variable CODER_OIDC_GROUP_AUTO_CREATE=true +``` +```shell # as a flag --oidc-group-auto-create=true ``` -A basic regex filtering option on the Coder side is available. This is applied **after** the group mapping (`CODER_OIDC_GROUP_MAPPING`), meaning if the group is remapped, the remapped value is tested in the regex. This is useful if you want to filter out groups that do not match a certain pattern. For example, if you want to only allow groups that start with `my-group-` to be created, you can set the following environment variable. +A basic regex filtering option on the Coder side is available. This is applied +**after** the group mapping (`CODER_OIDC_GROUP_MAPPING`), meaning if the group +is remapped, the remapped value is tested in the regex. This is useful if you +want to filter out groups that do not match a certain pattern. For example, if +you want to only allow groups that start with `my-group-` to be created, you can +set the following environment variable. -```console +```env # as an environment variable CODER_OIDC_GROUP_REGEX_FILTER="^my-group-.*$" +``` +```shell # as a flag --oidc-group-regex-filter="^my-group-.*$" ``` @@ -322,28 +358,39 @@ If you see an error like the following, you may have an invalid scope. The application '' asked for scope 'groups' that doesn't exist on the resource... ``` -This can happen because the identity provider has a different name for the scope. For example, Azure AD uses `GroupMember.Read.All` instead of `groups`. You can find the correct scope name in the IDP's documentation. Some IDP's allow configuring the name of this scope. +This can happen because the identity provider has a different name for the +scope. For example, Azure AD uses `GroupMember.Read.All` instead of `groups`. +You can find the correct scope name in the IDP's documentation. Some IDP's allow +configuring the name of this scope. -The solution is to update the value of `CODER_OIDC_SCOPES` to the correct value for the identity provider. +The solution is to update the value of `CODER_OIDC_SCOPES` to the correct value +for the identity provider. #### No `group` claim in the `got oidc claims` log Steps to troubleshoot. -1. Ensure the user is a part of a group in the IDP. If the user has 0 groups, no `groups` claim will be sent. -2. Check if another claim appears to be the correct claim with a different name. A common name is `memberOf` instead of `groups`. If this is present, update `CODER_OIDC_GROUP_FIELD=memberOf`. -3. Make sure the number of groups being sent is under the limit of the IDP. Some IDPs will return an error, while others will just omit the `groups` claim. A common solution is to create a filter on the identity provider that returns less than the limit for your IDP. +1. Ensure the user is a part of a group in the IDP. If the user has 0 groups, no + `groups` claim will be sent. +2. Check if another claim appears to be the correct claim with a different name. + A common name is `memberOf` instead of `groups`. If this is present, update + `CODER_OIDC_GROUP_FIELD=memberOf`. +3. Make sure the number of groups being sent is under the limit of the IDP. Some + IDPs will return an error, while others will just omit the `groups` claim. A + common solution is to create a filter on the identity provider that returns + less than the limit for your IDP. - [Azure AD limit is 200, and omits groups if exceeded.](https://learn.microsoft.com/en-us/azure/active-directory/hybrid/connect/how-to-connect-fed-group-claims#options-for-applications-to-consume-group-information) - [Okta limit is 100, and returns an error if exceeded.](https://developer.okta.com/docs/reference/api/oidc/#scope-dependent-claims-not-always-returned) ## Role sync (enterprise) If your OpenID Connect provider supports roles claims, you can configure Coder -to synchronize roles in your auth provider to deployment-wide roles within Coder. +to synchronize roles in your auth provider to deployment-wide roles within +Coder. Set the following in your Coder server [configuration](./configure.md). -```console +```env # Depending on your identity provider configuration, you may need to explicitly request a "roles" scope CODER_OIDC_SCOPES=openid,profile,email,roles @@ -352,7 +399,8 @@ CODER_OIDC_USER_ROLE_FIELD=roles CODER_OIDC_USER_ROLE_MAPPING='{"TemplateAuthor":["template-admin","user-admin"]}' ``` -> One role from your identity provider can be mapped to many roles in Coder (e.g. the example above maps to 2 roles in Coder.) +> One role from your identity provider can be mapped to many roles in Coder +> (e.g. the example above maps to 2 roles in Coder.) ## Provider-Specific Guides @@ -362,17 +410,20 @@ Below are some details specific to individual OIDC providers. > **Note:** Tested on ADFS 4.0, Windows Server 2019 -1. In your Federation Server, create a new application group for Coder. Follow the - steps as described [here.](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/development/msal/adfs-msal-web-app-web-api#app-registration-in-ad-fs) +1. In your Federation Server, create a new application group for Coder. Follow + the steps as described + [here.](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/development/msal/adfs-msal-web-app-web-api#app-registration-in-ad-fs) - **Server Application**: Note the Client ID. - **Configure Application Credentials**: Note the Client Secret. - **Configure Web API**: Set the Client ID as the relying party identifier. - - **Application Permissions**: Allow access to the claims `openid`, `email`, `profile`, and `allatclaims`. -1. Visit your ADFS server's `/.well-known/openid-configuration` URL and note - the value for `issuer`. - > **Note:** This is usually of the form `https://adfs.corp/adfs/.well-known/openid-configuration` -1. In Coder's configuration file (or Helm values as appropriate), set the following - environment variables or their corresponding CLI arguments: + - **Application Permissions**: Allow access to the claims `openid`, `email`, + `profile`, and `allatclaims`. +1. Visit your ADFS server's `/.well-known/openid-configuration` URL and note the + value for `issuer`. + > **Note:** This is usually of the form + > `https://adfs.corp/adfs/.well-known/openid-configuration` +1. In Coder's configuration file (or Helm values as appropriate), set the + following environment variables or their corresponding CLI arguments: - `CODER_OIDC_ISSUER_URL`: the `issuer` value from the previous step. - `CODER_OIDC_CLIENT_ID`: the Client ID from step 1. @@ -383,28 +434,44 @@ Below are some details specific to individual OIDC providers. {"resource":"$CLIENT_ID"} ``` - where `$CLIENT_ID` is the Client ID from step 1 ([see here](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/overview/ad-fs-openid-connect-oauth-flows-scenarios#:~:text=scope%E2%80%AFopenid.-,resource,-optional)). - This is required for the upstream OIDC provider to return the requested claims. + where `$CLIENT_ID` is the Client ID from step 1 + ([see here](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/overview/ad-fs-openid-connect-oauth-flows-scenarios#:~:text=scope%E2%80%AFopenid.-,resource,-optional)). + This is required for the upstream OIDC provider to return the requested + claims. - `CODER_OIDC_IGNORE_USERINFO`: Set to `true`. -1. Configure [Issuance Transform Rules](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-rule-to-send-ldap-attributes-as-claims) +1. Configure + [Issuance Transform Rules](https://learn.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-rule-to-send-ldap-attributes-as-claims) on your federation server to send the following claims: - `preferred_username`: You can use e.g. "Display Name" as required. - - `email`: You can use e.g. the LDAP attribute "E-Mail-Addresses" as required. + - `email`: You can use e.g. the LDAP attribute "E-Mail-Addresses" as + required. - `email_verified`: Create a custom claim rule: ```console => issue(Type = "email_verified", Value = "true") ``` - - (Optional) If using Group Sync, send the required groups in the configured groups claim field. See [here](https://stackoverflow.com/a/55570286) for an example. + - (Optional) If using Group Sync, send the required groups in the configured + groups claim field. See [here](https://stackoverflow.com/a/55570286) for an + example. ### Keycloak -The access_type parameter has two possible values: "online" and "offline." By default, the value is set to "offline". This means that when a user authenticates using OIDC, the application requests offline access to the user's resources, including the ability to refresh access tokens without requiring the user to reauthenticate. +The access_type parameter has two possible values: "online" and "offline." By +default, the value is set to "offline". This means that when a user +authenticates using OIDC, the application requests offline access to the user's +resources, including the ability to refresh access tokens without requiring the +user to reauthenticate. -To enable the `offline_access` scope, which allows for the refresh token functionality, you need to add it to the list of requested scopes during the authentication flow. Including the `offline_access` scope in the requested scopes ensures that the user is granted the necessary permissions to obtain refresh tokens. +To enable the `offline_access` scope, which allows for the refresh token +functionality, you need to add it to the list of requested scopes during the +authentication flow. Including the `offline_access` scope in the requested +scopes ensures that the user is granted the necessary permissions to obtain +refresh tokens. -By combining the `{"access_type":"offline"}` parameter in the OIDC Auth URL with the `offline_access` scope, you can achieve the desired behavior of obtaining refresh tokens for offline access to the user's resources. +By combining the `{"access_type":"offline"}` parameter in the OIDC Auth URL with +the `offline_access` scope, you can achieve the desired behavior of obtaining +refresh tokens for offline access to the user's resources. diff --git a/docs/admin/automation.md b/docs/admin/automation.md index 18751755b4..c9fc788330 100644 --- a/docs/admin/automation.md +++ b/docs/admin/automation.md @@ -1,6 +1,8 @@ # Automation -All actions possible through the Coder dashboard can also be automated as it utilizes the same public REST API. There are several ways to extend/automate Coder: +All actions possible through the Coder dashboard can also be automated as it +utilizes the same public REST API. There are several ways to extend/automate +Coder: - [CLI](../cli.md) - [REST API](../api/) @@ -10,13 +12,13 @@ All actions possible through the Coder dashboard can also be automated as it uti Generate a token on your Coder deployment by visiting: -```sh +```shell https://coder.example.com/settings/tokens ``` List your workspaces -```sh +```shell # CLI coder ls \ --url https://coder.example.com \ @@ -30,23 +32,34 @@ curl https://coder.example.com/api/v2/workspaces?q=owner:me \ ## Documentation -We publish an [API reference](../api/index.md) in our documentation. You can also enable a [Swagger endpoint](../cli/server.md#--swagger-enable) on your Coder deployment. +We publish an [API reference](../api/index.md) in our documentation. You can +also enable a [Swagger endpoint](../cli/server.md#--swagger-enable) on your +Coder deployment. ## Use cases -We strive to keep the following use cases up to date, but please note that changes to API queries and routes can occur. For the most recent queries and payloads, we recommend checking the CLI and API documentation. +We strive to keep the following use cases up to date, but please note that +changes to API queries and routes can occur. For the most recent queries and +payloads, we recommend checking the CLI and API documentation. ### Templates -- [Update templates in CI](../templates/change-management.md): Store all templates and git and update templates in CI/CD pipelines. +- [Update templates in CI](../templates/change-management.md): Store all + templates and git and update templates in CI/CD pipelines. ### Workspace agents -Workspace agents have a special token that can send logs, metrics, and workspace activity. +Workspace agents have a special token that can send logs, metrics, and workspace +activity. -- [Custom workspace logs](../api/agents.md#patch-workspace-agent-logs): Expose messages prior to the Coder init script running (e.g. pulling image, VM starting, restoring snapshot). [coder-logstream-kube](https://github.com/coder/coder-logstream-kube) uses this to show Kubernetes events, such as image pulls or ResourceQuota restrictions. +- [Custom workspace logs](../api/agents.md#patch-workspace-agent-logs): Expose + messages prior to the Coder init script running (e.g. pulling image, VM + starting, restoring snapshot). + [coder-logstream-kube](https://github.com/coder/coder-logstream-kube) uses + this to show Kubernetes events, such as image pulls or ResourceQuota + restrictions. - ```sh + ```shell curl -X PATCH https://coder.example.com/api/v2/workspaceagents/me/logs \ -H "Coder-Session-Token: $CODER_AGENT_TOKEN" \ -d "{ @@ -60,9 +73,11 @@ Workspace agents have a special token that can send logs, metrics, and workspace }" ``` -- [Manually send workspace activity](../api/agents.md#submit-workspace-agent-stats): Keep a workspace "active," even if there is not an open connection (e.g. for a long-running machine learning job). +- [Manually send workspace activity](../api/agents.md#submit-workspace-agent-stats): + Keep a workspace "active," even if there is not an open connection (e.g. for a + long-running machine learning job). - ```sh + ```shell #!/bin/bash # Send workspace activity as long as the job is still running diff --git a/docs/admin/configure.md b/docs/admin/configure.md index 2240ef4ed5..17ce483cb2 100644 --- a/docs/admin/configure.md +++ b/docs/admin/configure.md @@ -1,23 +1,26 @@ -Coder server's primary configuration is done via environment variables. For a full list of the options, run `coder server --help` or see our [CLI documentation](../cli/server.md). +Coder server's primary configuration is done via environment variables. For a +full list of the options, run `coder server --help` or see our +[CLI documentation](../cli/server.md). ## Access URL -`CODER_ACCESS_URL` is required if you are not using the tunnel. Set this to the external URL -that users and workspaces use to connect to Coder (e.g. ). This -should not be localhost. +`CODER_ACCESS_URL` is required if you are not using the tunnel. Set this to the +external URL that users and workspaces use to connect to Coder (e.g. +). This should not be localhost. -> Access URL should be a external IP address or domain with DNS records pointing to Coder. +> Access URL should be a external IP address or domain with DNS records pointing +> to Coder. ### Tunnel -If an access URL is not specified, Coder will create -a publicly accessible URL to reverse proxy your deployment for simple setup. +If an access URL is not specified, Coder will create a publicly accessible URL +to reverse proxy your deployment for simple setup. ## Address You can change which port(s) Coder listens on. -```sh +```shell # Listen on port 80 export CODER_HTTP_ADDRESS=0.0.0.0:80 @@ -34,22 +37,27 @@ coder server ## Wildcard access URL -`CODER_WILDCARD_ACCESS_URL` is necessary for [port forwarding](../networking/port-forwarding.md#dashboard) -via the dashboard or running [coder_apps](../templates/index.md#coder-apps) on an absolute path. Set this to a wildcard -subdomain that resolves to Coder (e.g. `*.coder.example.com`). +`CODER_WILDCARD_ACCESS_URL` is necessary for +[port forwarding](../networking/port-forwarding.md#dashboard) via the dashboard +or running [coder_apps](../templates/index.md#coder-apps) on an absolute path. +Set this to a wildcard subdomain that resolves to Coder (e.g. +`*.coder.example.com`). If you are providing TLS certificates directly to the Coder server, either 1. Use a single certificate and key for both the root and wildcard domains. 2. Configure multiple certificates and keys via - [`coder.tls.secretNames`](https://github.com/coder/coder/blob/main/helm/coder/values.yaml) in the Helm Chart, or - [`--tls-cert-file`](../cli/server.md#--tls-cert-file) and [`--tls-key-file`](../cli/server.md#--tls-key-file) command - line options (these both take a comma separated list of files; list certificates and their respective keys in the - same order). + [`coder.tls.secretNames`](https://github.com/coder/coder/blob/main/helm/coder/values.yaml) + in the Helm Chart, or [`--tls-cert-file`](../cli/server.md#--tls-cert-file) + and [`--tls-key-file`](../cli/server.md#--tls-key-file) command line options + (these both take a comma separated list of files; list certificates and their + respective keys in the same order). ## TLS & Reverse Proxy -The Coder server can directly use TLS certificates with `CODER_TLS_ENABLE` and accompanying configuration flags. However, Coder can also run behind a reverse-proxy to terminate TLS certificates from LetsEncrypt, for example. +The Coder server can directly use TLS certificates with `CODER_TLS_ENABLE` and +accompanying configuration flags. However, Coder can also run behind a +reverse-proxy to terminate TLS certificates from LetsEncrypt, for example. - [Apache](https://github.com/coder/coder/tree/main/examples/web-server/apache) - [Caddy](https://github.com/coder/coder/tree/main/examples/web-server/caddy) @@ -57,17 +65,19 @@ The Coder server can directly use TLS certificates with `CODER_TLS_ENABLE` and a ### Kubernetes TLS configuration -Below are the steps to configure Coder to terminate TLS when running on Kubernetes. -You must have the certificate `.key` and `.crt` files in your working directory prior to step 1. +Below are the steps to configure Coder to terminate TLS when running on +Kubernetes. You must have the certificate `.key` and `.crt` files in your +working directory prior to step 1. 1. Create the TLS secret in your Kubernetes cluster -```console +```shell kubectl create secret tls coder-tls -n --key="tls.key" --cert="tls.crt" ``` -> You can use a single certificate for the both the access URL and wildcard access URL. -> The certificate CN must match the wildcard domain, such as `*.example.coder.com`. +> You can use a single certificate for the both the access URL and wildcard +> access URL. The certificate CN must match the wildcard domain, such as +> `*.example.coder.com`. 1. Reference the TLS secret in your Coder Helm chart values @@ -87,14 +97,16 @@ coder: ## PostgreSQL Database -Coder uses a PostgreSQL database to store users, workspace metadata, and other deployment information. -Use `CODER_PG_CONNECTION_URL` to set the database that Coder connects to. If unset, PostgreSQL binaries will be -downloaded from Maven () and store all data in the config root. +Coder uses a PostgreSQL database to store users, workspace metadata, and other +deployment information. Use `CODER_PG_CONNECTION_URL` to set the database that +Coder connects to. If unset, PostgreSQL binaries will be downloaded from Maven +() and store all data in the config root. > Postgres 13 is the minimum supported version. If you are using the built-in PostgreSQL deployment and need to use `psql` (aka -the PostgreSQL interactive terminal), output the connection URL with the following command: +the PostgreSQL interactive terminal), output the connection URL with the +following command: ```console coder server postgres-builtin-url @@ -103,21 +115,26 @@ psql "postgres://coder@localhost:49627/coder?sslmode=disable&password=feU...yI1" ### Migrating from the built-in database to an external database -To migrate from the built-in database to an external database, follow these steps: +To migrate from the built-in database to an external database, follow these +steps: 1. Stop your Coder deployment. 2. Run `coder server postgres-builtin-serve` in a background terminal. 3. Run `coder server postgres-builtin-url` and copy its output command. -4. Run `pg_dump > coder.sql` to dump the internal database to a file. -5. Restore that content to an external database with `psql < coder.sql`. -6. Start your Coder deployment with `CODER_PG_CONNECTION_URL=`. +4. Run `pg_dump > coder.sql` to dump the internal + database to a file. +5. Restore that content to an external database with + `psql < coder.sql`. +6. Start your Coder deployment with + `CODER_PG_CONNECTION_URL=`. ## System packages -If you've installed Coder via a [system package](../install/packages.md) Coder, you can -configure the server by setting the following variables in `/etc/coder.d/coder.env`: +If you've installed Coder via a [system package](../install/packages.md) Coder, +you can configure the server by setting the following variables in +`/etc/coder.d/coder.env`: -```console +```env # String. Specifies the external URL (HTTP/S) to access Coder. CODER_ACCESS_URL=https://coder.example.com @@ -145,7 +162,7 @@ CODER_TLS_KEY_FILE= To run Coder as a system service on the host: -```console +```shell # Use systemd to start Coder now and on reboot sudo systemctl enable --now coder @@ -155,15 +172,15 @@ journalctl -u coder.service -b To restart Coder after applying system changes: -```console +```shell sudo systemctl restart coder ``` ## Configuring Coder behind a proxy -To configure Coder behind a corporate proxy, set the environment variables `HTTP_PROXY` and -`HTTPS_PROXY`. Be sure to restart the server. Lowercase values (e.g. `http_proxy`) are also -respected in this case. +To configure Coder behind a corporate proxy, set the environment variables +`HTTP_PROXY` and `HTTPS_PROXY`. Be sure to restart the server. Lowercase values +(e.g. `http_proxy`) are also respected in this case. ## Up Next diff --git a/docs/admin/git-providers.md b/docs/admin/git-providers.md index 293c88ab3c..0cbd0e00c9 100644 --- a/docs/admin/git-providers.md +++ b/docs/admin/git-providers.md @@ -1,10 +1,13 @@ # Git Providers -Coder integrates with git providers to automate away the need for developers to authenticate with repositories within their workspace. +Coder integrates with git providers to automate away the need for developers to +authenticate with repositories within their workspace. ## How it works -When developers use `git` inside their workspace, they are prompted to authenticate. After that, Coder will store and refresh tokens for future operations. +When developers use `git` inside their workspace, they are prompted to +authenticate. After that, Coder will store and refresh tokens for future +operations.