Compare commits
169 Commits
xteve-2.0.
...
intel-gpu-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef5f8b3451 | ||
|
|
c516c4e05d | ||
|
|
6104d0bc80 | ||
|
|
12752f4018 | ||
|
|
010c3dbf06 | ||
|
|
2dba5d9d8e | ||
|
|
1a3f1ad996 | ||
|
|
f00fef03b6 | ||
|
|
23dc0d5b14 | ||
|
|
bae95225cf | ||
|
|
d90ddd9e2b | ||
|
|
c058d4adc0 | ||
|
|
b2f1661912 | ||
|
|
7e56103498 | ||
|
|
d3755f1a04 | ||
|
|
b237726244 | ||
|
|
90a691aea0 | ||
|
|
3ee806f563 | ||
|
|
b8ae055561 | ||
|
|
8c4f2de8c4 | ||
|
|
374b098436 | ||
|
|
e63dbd08d1 | ||
|
|
2c15d6617c | ||
|
|
aca509001d | ||
|
|
053d99b0fe | ||
|
|
cf9a32053d | ||
|
|
d59192d04c | ||
|
|
47c37d6ad8 | ||
|
|
571372c4c7 | ||
|
|
e3e42eea31 | ||
|
|
eaf3b47fec | ||
|
|
8e8e8afd50 | ||
|
|
1ec31ad877 | ||
|
|
628e2f6842 | ||
|
|
85c7673e31 | ||
|
|
53ca0dfafd | ||
|
|
ade40a1e9d | ||
|
|
bc2742e655 | ||
|
|
fb1c653533 | ||
|
|
9e284da7a6 | ||
|
|
6929543b6f | ||
|
|
979349b96f | ||
|
|
521d473cc0 | ||
|
|
00f3ce5523 | ||
|
|
f7e980ab9c | ||
|
|
a037936b3e | ||
|
|
63b87146a3 | ||
|
|
a5b55b33e4 | ||
|
|
2eedb285e8 | ||
|
|
54d5f5aaeb | ||
|
|
f8babcb5a2 | ||
|
|
f15926425f | ||
|
|
b6ec5f8e71 | ||
|
|
8158841f31 | ||
|
|
ff58303989 | ||
|
|
614f2bd25f | ||
|
|
ca2c348e6d | ||
|
|
5bff2ae5ed | ||
|
|
1bd47c326a | ||
|
|
7d06c3d5e3 | ||
|
|
451d0510c2 | ||
|
|
cd06a6fb61 | ||
|
|
1eb548d382 | ||
|
|
befa7553fa | ||
|
|
b629ecc876 | ||
|
|
2676dbded2 | ||
|
|
7e92803f87 | ||
|
|
10cfeb8bd1 | ||
|
|
4f99bc67fb | ||
|
|
6d5c992852 | ||
|
|
75fd9f4e6d | ||
|
|
da9bea90b3 | ||
|
|
3b06c431b0 | ||
|
|
b899548da9 | ||
|
|
74845ca08e | ||
|
|
3a40f65b46 | ||
|
|
43392e1e7a | ||
|
|
d3406d1f39 | ||
|
|
db24d009cc | ||
|
|
b94814d3d7 | ||
|
|
3070528d2f | ||
|
|
de73201b2b | ||
|
|
ba4e6b978c | ||
|
|
48df925051 | ||
|
|
2ecc70f1df | ||
|
|
5c35aa1a1d | ||
|
|
12853f3b9a | ||
|
|
31959e5e37 | ||
|
|
a75a6cef77 | ||
|
|
ac68205d8b | ||
|
|
66d5bd7193 | ||
|
|
c40bdfeff7 | ||
|
|
04478fd52f | ||
|
|
ab4fd1b1e0 | ||
|
|
aec35fe08f | ||
|
|
16828ba415 | ||
|
|
2a3f676426 | ||
|
|
1b1898809b | ||
|
|
1dff5670d8 | ||
|
|
e5b78c7314 | ||
|
|
c5b81a263f | ||
|
|
e7e4665389 | ||
|
|
990ba59dfa | ||
|
|
0ed3ecbb48 | ||
|
|
480fa5a7d3 | ||
|
|
1f6050759b | ||
|
|
0f37c8776d | ||
|
|
5451ce26ab | ||
|
|
107e53d3b7 | ||
|
|
f4855955cf | ||
|
|
a5694ab9d9 | ||
|
|
2508a42660 | ||
|
|
cf4c0ba997 | ||
|
|
5705371a35 | ||
|
|
76c5160e37 | ||
|
|
a26921bba5 | ||
|
|
c67e3df333 | ||
|
|
97f18a033c | ||
|
|
22017632bc | ||
|
|
c991d11bce | ||
|
|
561a0f25bb | ||
|
|
e0f64a26f2 | ||
|
|
8999baca25 | ||
|
|
90daf5bcf1 | ||
|
|
1746270044 | ||
|
|
55313d0be2 | ||
|
|
153620272e | ||
|
|
8a7fe72ea6 | ||
|
|
ca6493faf3 | ||
|
|
576ff487df | ||
|
|
65abab892e | ||
|
|
50ce4d6bde | ||
|
|
c69cc6751f | ||
|
|
995ef7ef2b | ||
|
|
6a3b129a4b | ||
|
|
6c8d01add3 | ||
|
|
9798bb82cc | ||
|
|
0322acc6fe | ||
|
|
0a221f5297 | ||
|
|
ab941ae48d | ||
|
|
a078da5499 | ||
|
|
c2df150921 | ||
|
|
d28bf3fecf | ||
|
|
7f3bc53d12 | ||
|
|
652612e76b | ||
|
|
93addda234 | ||
|
|
08f9adbd73 | ||
|
|
73956c3eed | ||
|
|
609b2dbe31 | ||
|
|
ac0202a0c4 | ||
|
|
1a67cf9070 | ||
|
|
4cbe828448 | ||
|
|
be82a0fccb | ||
|
|
d1fbb47709 | ||
|
|
214dd6eaac | ||
|
|
3f50bc7f61 | ||
|
|
10348d1c0b | ||
|
|
062db282ed | ||
|
|
457a149637 | ||
|
|
1b9cfcfb80 | ||
|
|
23a666b18b | ||
|
|
66a943c448 | ||
|
|
8c958cbadb | ||
|
|
ba63649c59 | ||
|
|
d149fb6bd7 | ||
|
|
f5241bde3a | ||
|
|
5899c0002c | ||
|
|
2182e215f2 | ||
|
|
eeda505585 |
14
.github/workflows/lint-test.yaml
vendored
14
.github/workflows/lint-test.yaml
vendored
@@ -1,30 +1,28 @@
|
||||
name: Lint and Test Charts
|
||||
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
run: |
|
||||
git fetch --prune --unshallow;
|
||||
echo ::set-env name=commitmsg::$(git log --format=%B -n 1 ${{ github.event.after }})
|
||||
- name: Run chart-testing (lint)
|
||||
id: lint
|
||||
uses: helm/chart-testing-action@v1.0.0
|
||||
if: "! contains(env.commitmsg, '[skip lint]')"
|
||||
with:
|
||||
command: lint
|
||||
config: ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.0.0
|
||||
if: steps.lint.outputs.changed == 'true'
|
||||
|
||||
if: "steps.lint.outputs.changed == 'true' && ! contains(env.commitmsg, '[skip install]')"
|
||||
- name: Run chart-testing (install)
|
||||
uses: helm/chart-testing-action@v1.0.0
|
||||
if: "steps.lint.outputs.changed == 'true' && ! contains(env.commitmsg, '[skip install]')"
|
||||
with:
|
||||
command: install
|
||||
config: ct.yaml
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,4 @@
|
||||
.env
|
||||
.idea
|
||||
charts/*/Chart.lock
|
||||
charts/*/charts
|
||||
|
||||
0
.gitmodules
vendored
0
.gitmodules
vendored
13
.pre-commit-config.yaml
Normal file
13
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
# See https://pre-commit.com for more information
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ct-lint
|
||||
name: "Chart Test: Lint"
|
||||
language: docker_image
|
||||
pass_filenames: false
|
||||
types: ['file']
|
||||
files: '^charts/.*(\.ya?ml|\.tpl|\.helmignore|NOTES.txt)'
|
||||
entry: -u 0 quay.io/helmpack/chart-testing:v3.0.0 ct
|
||||
args:
|
||||
- lint
|
||||
@@ -33,9 +33,9 @@ See `git help commit`:
|
||||
|
||||
### Technical Requirements
|
||||
|
||||
* Must follow [Charts best practices](https://helm.sh/docs/topics/chart_best_practices/)
|
||||
* Must pass CI jobs for linting and installing changed charts with the [chart-testing](https://github.com/helm/chart-testing) tool
|
||||
* Any change to a chart requires a version bump following [semver](https://semver.org/) principles. See [Immutability(#immutability) and [Versioning](#versioning) below
|
||||
* Must follow [Charts best practices](https://helm.sh/docs/topics/chart_best_practices/).
|
||||
* Must pass CI jobs for linting and installing changed charts with the [chart-testing](https://github.com/helm/chart-testing) tool See [pre-commit](#pre-commit) below.
|
||||
* Any change to a chart requires a version bump following [semver](https://semver.org/) principles. See [Immutability](#immutability) and [Versioning](#versioning) below.
|
||||
|
||||
Once changes have been merged, the release job will automatically run to package and release changed charts.
|
||||
|
||||
@@ -51,3 +51,7 @@ Charts should start at `1.0.0`. Any breaking (backwards incompatible) changes to
|
||||
|
||||
1. Bump the MAJOR version
|
||||
2. In the README, under a section called "Upgrading", describe the manual steps necessary to upgrade to the new (specified) MAJOR version
|
||||
|
||||
### pre-commit
|
||||
|
||||
This repo supports the [pre-commit](https://pre-commit.com) framework. By installing the framework (see [docs](https://pre-commit.com/#install)) it is possible to perform the chart linting step before committing your code. This can help prevent linter issues in the pipeline. Note that this requires having Docker running on your development environment.
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://github.com/k8s-at-home/charts/actions)
|
||||
|
||||
[](https://github.com/pre-commit/pre-commit)
|
||||
[](https://artifacthub.io/packages/search?repo=k8s-at-home)
|
||||
## Usage
|
||||
|
||||
[Helm](https://helm.sh) must be installed to use the charts.
|
||||
|
||||
72
charts/README.templates.md.gotmpl
Normal file
72
charts/README.templates.md.gotmpl
Normal file
@@ -0,0 +1,72 @@
|
||||
{{- define "repository.organization" -}}
|
||||
k8s-at-home
|
||||
{{- end -}}
|
||||
|
||||
{{- define "repository.url" -}}
|
||||
https://github.com/k8s-at-home/charts
|
||||
{{- end -}}
|
||||
|
||||
{{- define "helm.url" -}}
|
||||
https://k8s-at-home.com/charts/
|
||||
{{- end -}}
|
||||
|
||||
{{- define "helm.path" -}}
|
||||
{{ template "repository.organization" . }}/{{ template "chart.name" . }}
|
||||
{{- end -}}
|
||||
{{- define "badge.artifactHub" -}}
|
||||
[](https://artifacthub.io/packages/helm/{{ template "chart.name" . }})
|
||||
{{- end -}}
|
||||
{{- define "description.multiarch" -}}
|
||||
The default values and container images used in this chart will allow for running in a multi-arch cluster (amd64, arm, arm64)
|
||||
{{- end -}}
|
||||
|
||||
{{- define "install.tldr" -}}
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add {{ template "repository.organization" . }} {{ template "helm.url" . }}
|
||||
$ helm install {{ template "helm.path" . }}
|
||||
```
|
||||
{{- end -}}
|
||||
|
||||
{{- define "install" -}}
|
||||
## Installing the Chart
|
||||
To install the chart with the release name `{{ template "chart.name" . }}`:
|
||||
```console
|
||||
helm install {{ template "chart.name" . }} {{ template "helm.path" . }}
|
||||
```
|
||||
{{- end -}}
|
||||
|
||||
{{- define "uninstall" -}}
|
||||
## Uninstalling the Chart
|
||||
To uninstall the `{{ template "chart.name" . }}` deployment:
|
||||
```console
|
||||
helm uninstall {{ template "chart.name" . }}
|
||||
```
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
{{- end -}}
|
||||
|
||||
{{- define "configuration.header" -}}
|
||||
## Configuration
|
||||
{{- end -}}
|
||||
|
||||
{{- define "configuration.readValues" -}}
|
||||
Read through the [values.yaml]({{ template "repository.url" . }}/blob/master/charts/{{ template "chart.name" . }}/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
{{- end -}}
|
||||
|
||||
{{- define "configuration.example.set" -}}
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
```console
|
||||
helm install {{ template "chart.name" . }} \
|
||||
--set env.TZ="America/New York" \
|
||||
{{ template "helm.path" . }}
|
||||
```
|
||||
{{- end -}}
|
||||
|
||||
{{- define "configuration.example.file" -}}
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart.
|
||||
For example,
|
||||
```console
|
||||
helm install {{ template "chart.name" . }} {{ template "helm.path" . }} --values values.yaml
|
||||
```
|
||||
{{- end -}}
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.8.4.2-ls72
|
||||
appVersion: v0.9.0.2
|
||||
description: Bazarr is a companion application to Sonarr and Radarr. It manages and downloads subtitles based on your requirements
|
||||
name: bazarr
|
||||
version: 3.0.0
|
||||
version: 3.1.0
|
||||
keywords:
|
||||
- bazarr
|
||||
- radarr
|
||||
|
||||
@@ -75,6 +75,7 @@ The following tables lists the configurable parameters of the Sentry chart and t
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subpath` | Select a subpath in the PVC | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
|
||||
@@ -64,6 +64,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: {{ .Values.persistence.config.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: linuxserver/bazarr
|
||||
tag: v0.8.4.2-ls72
|
||||
tag: v0.9.0.2-ls89
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
@@ -78,6 +78,7 @@ persistence:
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: "v0.8"
|
||||
appVersion: v0.10
|
||||
description: DNS proxy as ad-blocker for local network
|
||||
name: blocky
|
||||
version: 4.0.0
|
||||
version: 4.0.1
|
||||
keywords:
|
||||
- blocky
|
||||
- dbs
|
||||
|
||||
@@ -2,7 +2,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: spx01/blocky
|
||||
tag: v0.8
|
||||
tag: v0.10
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nameOverride: ""
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
type: application
|
||||
|
||||
apiVersion: v2
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 0.6.8-ls74
|
||||
appVersion: 0.6.8
|
||||
description: Calibre-Web is a web app providing a clean interface for browsing, reading and downloading eBooks using an existing Calibre database.
|
||||
name: calibre-web
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- calibre
|
||||
- ebook
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: 7260c12f-ls33
|
||||
appVersion: v3.0.1
|
||||
description: couchpotato is a movie downloading client
|
||||
name: couchpotato
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- couchpotato
|
||||
- usenet
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: linuxserver/couchpotato
|
||||
tag: 7260c12f-ls33
|
||||
tag: 7260c12f-ls42
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
|
||||
12
charts/dashmachine/Chart.yaml
Normal file
12
charts/dashmachine/Chart.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.5-4
|
||||
description: DashMachine is another web application bookmark dashboard, with fun features.
|
||||
icon: https://github.com/rmountjoy92/DashMachine/raw/master/dashmachine/static/images/logo/logo.png
|
||||
home: https://github.com/rmountjoy92/DashMachine
|
||||
name: dashmachine
|
||||
version: 1.0.0
|
||||
sources:
|
||||
- https://github.com/rmountjoy92/DashMachine
|
||||
maintainers:
|
||||
- name: carpenike
|
||||
email: ryan@ryanholt.net
|
||||
31
charts/dashmachine/README.md
Normal file
31
charts/dashmachine/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
dashmachine
|
||||
===========
|
||||
DashMachine is another web application bookmark dashboard, with fun features.
|
||||
|
||||
## Chart Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | |
|
||||
| deploymentAnnotations | object | `{}` | |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | |
|
||||
| image.repository | string | `"rmountjoy/dashmachine"` | |
|
||||
| image.tag | string | `"latest"` | |
|
||||
| ingress.annotations | object | `{}` | |
|
||||
| ingress.enabled | bool | `false` | |
|
||||
| ingress.hosts[0] | string | `"chart-example.local"` | |
|
||||
| ingress.paths[0] | string | `"/"` | |
|
||||
| ingress.tls | list | `[]` | |
|
||||
| nameOverride | string | `""` | |
|
||||
| nodeSelector | object | `{}` | |
|
||||
| persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| persistence.enabled | bool | `false` | |
|
||||
| persistence.size | string | `"1Gi"` | |
|
||||
| persistence.storageClassName | string | `""` | |
|
||||
| podAnnotations | object | `{}` | |
|
||||
| replicaCount | int | `1` | |
|
||||
| resources | object | `{}` | |
|
||||
| service.port | int | `5000` | |
|
||||
| service.type | string | `"ClusterIP"` | |
|
||||
| tolerations | list | `[]` | |
|
||||
@@ -1,19 +1,21 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range $.Values.ingress.paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sonarr.fullname" . }})
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "dashmachine.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "sonarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sonarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "dashmachine.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "dashmachine.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sonarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "dashmachine.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -2,7 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "lidarr.name" -}}
|
||||
{{- define "dashmachine.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "lidarr.fullname" -}}
|
||||
{{- define "dashmachine.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
@@ -27,6 +27,6 @@ If release name contains chart name it will be used as a full name.
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "lidarr.chart" -}}
|
||||
{{- define "dashmachine.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
78
charts/dashmachine/templates/deployment.yaml
Normal file
78
charts/dashmachine/templates/deployment.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dashmachine.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /notifications
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /notifications
|
||||
# port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /dashmachine/dashmachine/user_data
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ template "dashmachine.fullname" . }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,22 +1,19 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "ombi.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- $fullName := include "dashmachine.fullname" . -}}
|
||||
{{- $ingressPaths := .Values.ingress.paths -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
@@ -33,9 +30,11 @@ spec:
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
{{- range $ingressPaths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
24
charts/dashmachine/templates/pvc.yaml
Normal file
24
charts/dashmachine/templates/pvc.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ template "dashmachine.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.persistence.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
storageClassName: {{ .Values.persistence.storageClass }}
|
||||
{{- end -}}
|
||||
19
charts/dashmachine/templates/service.yaml
Normal file
19
charts/dashmachine/templates/service.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dashmachine.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
18
charts/dashmachine/templates/tests/test-connection.yaml
Normal file
18
charts/dashmachine/templates/tests/test-connection.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "dashmachine.fullname" . }}-test-connection"
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "dashmachine.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
65
charts/dashmachine/values.yaml
Normal file
65
charts/dashmachine/values.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
# Default values for dashmachine.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rmountjoy/dashmachine
|
||||
tag: v0.5-4
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5000
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
paths: ["/"]
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: "1.0"
|
||||
appVersion: 1.0
|
||||
description: Dynamic DNS using DigitalOcean's DNS Services
|
||||
name: digitalocean-dyndns
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- digitalocean
|
||||
- dynamicdns
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: v2.0.5.1-2.0.5.1_beta_2020-01-18-ls58
|
||||
appVersion: v2.0.5.1
|
||||
description: Store securely encrypted backups on cloud storage services!
|
||||
name: duplicati
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- duplicati
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/duplicati
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: linuxserver/duplicati
|
||||
tag: v2.0.5.1-2.0.5.1_beta_2020-01-18-ls58
|
||||
tag: v2.0.5.1-2.0.5.1_beta_2020-01-18-ls72
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: 1.14.5
|
||||
appVersion: 1.15.2
|
||||
description: ESPHome
|
||||
name: esphome
|
||||
version: 2.0.0
|
||||
version: 2.2.0
|
||||
keywords:
|
||||
- esphome
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/esphome
|
||||
|
||||
@@ -34,65 +34,72 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the ESPHome chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `esphome/esphome` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/esphome/esphome/tags/).| `0.14.5`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Secrets to use when pulling the image | `[]` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `probes.liveness.enabled` | Use the livenessProbe? | `true` |
|
||||
| `probes.liveness.scheme ` | Specify liveness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.enabled` | Use the readinessProbe? | `true` |
|
||||
| `probes.readiness.scheme ` | Specify readiness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.enabled` | Use the startupProbe? (new in kubernetes 1.16) | `false` |
|
||||
| `probes.startup.scheme ` | Specify startup `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the esphome GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the esphome GUI is exposed| `6052` |
|
||||
| `service.portName` | Kubernetes port name where the esphome GUI is exposed | `api` |
|
||||
| `service.additionalPorts` | Add additional ports exposed by the esphome container integrations. Example homematic needs to expose a proxy port | `{}` |
|
||||
| `service.annotations` | Service annotations for the esphome GUI | `{}` |
|
||||
| `service.clusterIP` | Cluster IP for the esphom GUI | `` |
|
||||
| `service.externalIPs` | External IPs for the esphome GUI | `[]` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the esphome GUI | `` |
|
||||
| `service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the esphome GUI | `[]` |
|
||||
| `service.publishNotReadyAddresses` | Set to true if the notReadyAddresses should be published | `false` |
|
||||
| `service.externalTrafficPolicy` | Loadbalancer externalTrafficPolicy | `` |
|
||||
| `hostNetwork` | Enable hostNetwork - might be needed for discovery to work | `false` |
|
||||
| `service.nodePort` | nodePort to listen on for the esphome GUI | `` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `true` |
|
||||
| `persistence.size` | Size of persistent volume claim | `5Gi` |
|
||||
| `persistence.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.hostPath`| The path to the config directory on the host, instead of a PVC | `nil` |
|
||||
| `persistence.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.accessMode` | Persistence access modes | `ReadWriteMany` |
|
||||
| `hostMounts` | Array of host directories to mount; can be used for devices | [] |
|
||||
| `hostMounts.name` | Name of the volume | `nil` |
|
||||
| `hostMounts.hostPath` | The path on the host machine | `nil` |
|
||||
| `hostMounts.mountPath` | The path at which to mount (optional; assumed same as hostPath) | `nil` |
|
||||
| `hostMounts.type` | The type to mount (optional, i.e., `Directory`) | `nil` |
|
||||
| `extraEnv` | Extra ENV vars to pass to the esphome container | `{}` |
|
||||
| `extraEnvSecrets` | Extra env vars to pass to the esphome container from k8s secrets - see `values.yaml` for an example | `{}` |
|
||||
| `resources` | CPU/Memory resource requests/limits or the esphome GUI | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment or the esphome GUI | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment or the esphome GUI | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment or the esphome GUI | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `extraVolumes` | Any extra volumes to define for the pod | `{}` |
|
||||
| `extraVolumeMounts` | Any extra volumes mounts to define for each container of the pod | `{}` |
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------------------|--------------------------------------------------------------------------------------------------------------------|-----------------------|
|
||||
| `image.repository` | Image repository | `esphome/esphome` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/esphome/esphome/tags/). | `0.14.5` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Secrets to use when pulling the image | `[]` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `probes.liveness.enabled` | Use the livenessProbe? | `true` |
|
||||
| `probes.liveness.scheme ` | Specify liveness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.enabled` | Use the readinessProbe? | `true` |
|
||||
| `probes.readiness.scheme ` | Specify readiness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.enabled` | Use the startupProbe? (new in kubernetes 1.16) | `false` |
|
||||
| `probes.startup.scheme ` | Specify startup `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the esphome GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the esphome GUI is exposed | `6052` |
|
||||
| `service.portName` | Kubernetes port name where the esphome GUI is exposed | `api` |
|
||||
| `service.additionalPorts` | Add additional ports exposed by the esphome container integrations. Example homematic needs to expose a proxy port | `{}` |
|
||||
| `service.annotations` | Service annotations for the esphome GUI | `{}` |
|
||||
| `service.clusterIP` | Cluster IP for the esphom GUI | `` |
|
||||
| `service.externalIPs` | External IPs for the esphome GUI | `[]` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the esphome GUI | `` |
|
||||
| `service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the esphome GUI | `[]` |
|
||||
| `service.publishNotReadyAddresses` | Set to true if the notReadyAddresses should be published | `false` |
|
||||
| `service.externalTrafficPolicy` | Loadbalancer externalTrafficPolicy | `` |
|
||||
| `hostNetwork` | Enable hostNetwork - might be needed for discovery to work | `false` |
|
||||
| `service.nodePort` | nodePort to listen on for the esphome GUI | `` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `true` |
|
||||
| `persistence.size` | Size of persistent volume claim | `5Gi` |
|
||||
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.hostPath` | The path to the config directory on the host, instead of a PVC | `nil` |
|
||||
| `persistence.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.accessMode` | Persistence access modes | `ReadWriteMany` |
|
||||
| `git.enabled` | Use git-sync in init container | `false` |
|
||||
| `git.secret` | Git secret to use for git-sync | `git-creds` |
|
||||
| `git.syncPath` | Git sync path | `/config` |
|
||||
| `git.keyPath` | Git ssh key path | `/root/.ssh` |
|
||||
| `git.user.name` | Human-readable name in the “committer” and “author” fields | `` |
|
||||
| `git.user.email` | Email address for the “committer” and “author” fields | `` |
|
||||
| `hostMounts` | Array of host directories to mount; can be used for devices | [] |
|
||||
| `hostMounts.name` | Name of the volume | `nil` |
|
||||
| `hostMounts.hostPath` | The path on the host machine | `nil` |
|
||||
| `hostMounts.mountPath` | The path at which to mount (optional; assumed same as hostPath) | `nil` |
|
||||
| `hostMounts.type` | The type to mount (optional, i.e., `Directory`) | `nil` |
|
||||
| `extraEnv` | Extra ENV vars to pass to the esphome container | `{}` |
|
||||
| `extraEnvSecrets` | Extra env vars to pass to the esphome container from k8s secrets - see `values.yaml` for an example | `{}` |
|
||||
| `resources` | CPU/Memory resource requests/limits or the esphome GUI | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment or the esphome GUI | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment or the esphome GUI | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment or the esphome GUI | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `extraVolumes` | Any extra volumes to define for the pod | `{}` |
|
||||
| `extraVolumeMounts` | Any extra volumes mounts to define for each container of the pod | `{}` |
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -37,6 +37,56 @@ spec:
|
||||
hostNetwork: {{ .Values.hostNetwork }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- end }}
|
||||
initContainers:
|
||||
{{- if .Values.git.enabled }}
|
||||
- name: git-sync
|
||||
image: "{{ .Values.git.image.repository }}:{{ .Values.git.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.git.image.pullPolicy }}
|
||||
{{- if .Values.git.command }}
|
||||
command:
|
||||
{{- range .Values.git.command }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- set -e;
|
||||
if [ -d "{{ .Values.git.syncPath }}/.git" ];
|
||||
then
|
||||
git -C "{{ .Values.git.syncPath }}" pull || true;
|
||||
else
|
||||
if [ "$(ls -A {{ .Values.git.syncPath }})" ];
|
||||
then
|
||||
git clone --depth 2 "{{ .Values.git.repo }}" /tmp/repo;
|
||||
cp -rf /tmp/repo/.git "{{ .Values.git.syncPath }}";
|
||||
cd "{{ .Values.git.syncPath }}";
|
||||
git checkout -f;
|
||||
else
|
||||
git clone --depth 2 "{{ .Values.git.repo }}" "{{ .Values.git.syncPath }}";
|
||||
fi;
|
||||
fi;
|
||||
if [ -f "{{ .Values.git.keyPath }}/git-crypt-key" ];
|
||||
then
|
||||
cd {{ .Values.git.syncPath }};
|
||||
git-crypt unlock "{{ .Values.git.keyPath }}/git-crypt-key";
|
||||
fi;
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: {{ .Values.git.keyPath }}
|
||||
name: git-secret
|
||||
{{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }}{{ end }}
|
||||
{{- if .Values.usePodSecurityContext }}
|
||||
securityContext:
|
||||
runAsUser: {{ default 0 .Values.runAsUser }}
|
||||
{{- if and (.Values.runAsUser) (.Values.fsGroup) }}
|
||||
{{- if not (eq .Values.runAsUser 0.0) }}
|
||||
fsGroup: {{ .Values.fsGroup }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
@@ -144,6 +194,13 @@ spec:
|
||||
type: {{ .type }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.git.enabled }}
|
||||
- name: git-secret
|
||||
secret:
|
||||
defaultMode: 256
|
||||
secretName: {{ .Values.git.secret }}
|
||||
optional: true
|
||||
{{ end }}
|
||||
{{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: esphome/esphome
|
||||
tag: 1.14.5
|
||||
tag: 1.15.2
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecrets: []
|
||||
|
||||
@@ -152,3 +152,25 @@ extraVolumes: []
|
||||
extraVolumeMounts: []
|
||||
# - name: example-name
|
||||
# mountPath: /path/in/container
|
||||
|
||||
|
||||
git:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
repository: k8sathome/git-crypt
|
||||
tag: 2020.09.28
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Specify the command that runs in the git-sync container to pull in configuration.
|
||||
# command: []
|
||||
|
||||
# Committer settings
|
||||
user:
|
||||
name: ""
|
||||
email: ""
|
||||
|
||||
repo: ""
|
||||
secret: git-creds
|
||||
syncPath: /config
|
||||
keyPath: /root/.ssh
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: 2.2.2-ls84
|
||||
appVersion: 2.2.2
|
||||
description: An Application dashboard and launcher
|
||||
name: heimdall
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- heimdall
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/heimdall
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: linuxserver/heimdall
|
||||
tag: 2.2.2-ls84
|
||||
tag: 2.2.2-ls98
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.114.0
|
||||
appVersion: 0.115.2
|
||||
description: Home Assistant
|
||||
name: home-assistant
|
||||
version: 2.0.0
|
||||
version: 2.5.0
|
||||
keywords:
|
||||
- home-assistant
|
||||
- hass
|
||||
@@ -32,3 +32,7 @@ dependencies:
|
||||
version: 7.7.1
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: mariadb.enabled
|
||||
- name: influxdb
|
||||
version: 0.6.7
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: influxdb.enabled
|
||||
|
||||
@@ -35,158 +35,162 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the Home Assistant chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
|
||||
| `image.repository` | Image repository | `homeassistant/home-assistant` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/homeassistant/home-assistant/tags/). | `0.114.0` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Secrets to use when pulling the image | `[]` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `probes.liveness.enabled` | Use the livenessProbe? | `true` |
|
||||
| `probes.liveness.scheme` | Specify liveness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.enabled` | Use the readinessProbe? | `true` |
|
||||
| `probes.readiness.scheme` | Specify readiness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.enabled` | Use the startupProbe? (new in kubernetes 1.16) | `false` |
|
||||
| `probes.startup.scheme` | Specify startup `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the home-assistant GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the home-assistant GUI is exposed | `8123` |
|
||||
| `service.portName` | Kubernetes port name where the home-assistant GUI is exposed | `api` |
|
||||
| `service.additionalPorts` | Add additional ports exposed by the home assistant container integrations. Example homematic needs to expose a proxy port | `{}` |
|
||||
| `service.annotations` | Service annotations for the home-assistant GUI | `{}` |
|
||||
| `service.clusterIP` | Cluster IP for the home-assistant GUI | `` |
|
||||
| `service.externalIPs` | External IPs for the home-assistant GUI | `[]` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the home-assistant GUI | `` |
|
||||
| `service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the home-assistant GUI | `[]` |
|
||||
| `service.publishNotReadyAddresses` | Set to true if the editors (vscode or configurator) should be reachable when home assistant does not run | `false` |
|
||||
| `service.externalTrafficPolicy` | Loadbalancer externalTrafficPolicy | `` |
|
||||
| `hostNetwork` | Enable hostNetwork - might be needed for discovery to work | `false` |
|
||||
| `service.nodePort` | nodePort to listen on for the home-assistant GUI | `` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `true` |
|
||||
| `persistence.size` | Size of persistent volume claim | `5Gi` |
|
||||
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.hostPath` | The path to the config directory on the host, instead of a PVC | `nil` |
|
||||
| `persistence.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.accessMode` | Persistence access modes | `ReadWriteMany` |
|
||||
| `git.enabled` | Use git-sync in init container | `false` |
|
||||
| `git.secret` | Git secret to use for git-sync | `git-creds` |
|
||||
| `git.syncPath` | Git sync path | `/config` |
|
||||
| `git.keyPath` | Git ssh key path | `/root/.ssh` |
|
||||
| `git.user.name` | Human-readable name in the “committer” and “author” fields | `` |
|
||||
| `git.user.email` | Email address for the “committer” and “author” fields | `` |
|
||||
| `zwave.enabled` | Enable zwave host device passthrough. Also enables privileged container mode. | `false` |
|
||||
| `zwave.device` | Device to passthrough to guest | `ttyACM0` |
|
||||
| `hostMounts` | Array of host directories to mount; can be used for devices | [] |
|
||||
| `hostMounts.name` | Name of the volume | `nil` |
|
||||
| `hostMounts.hostPath` | The path on the host machine | `nil` |
|
||||
| `hostMounts.mountPath` | The path at which to mount (optional; assumed same as hostPath) | `nil` |
|
||||
| `hostMounts.type` | The type to mount (optional, i.e., `Directory`) | `nil` |
|
||||
| `extraEnv` | Extra ENV vars to pass to the home-assistant container | `{}` |
|
||||
| `extraEnvSecrets` | Extra env vars to pass to the home-assistant container from k8s secrets - see `values.yaml` for an example | `{}` |
|
||||
| `configurator.enabled` | Enable the optional [configuration UI](https://github.com/danielperna84/hass-configurator) | `false` |
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
|
||||
| `image.repository` | Image repository | `homeassistant/home-assistant` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/homeassistant/home-assistant/tags/). | `0.114.0` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Secrets to use when pulling the image | `[]` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `probes.liveness.enabled` | Use the livenessProbe? | `true` |
|
||||
| `probes.liveness.scheme` | Specify liveness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.enabled` | Use the readinessProbe? | `true` |
|
||||
| `probes.readiness.scheme` | Specify readiness `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.enabled` | Use the startupProbe? (new in kubernetes 1.16) | `false` |
|
||||
| `probes.startup.scheme` | Specify startup `scheme` parameter for the deployment | `HTTP` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the home-assistant GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the home-assistant GUI is exposed | `8123` |
|
||||
| `service.portName` | Kubernetes port name where the home-assistant GUI is exposed | `api` |
|
||||
| `service.additionalPorts` | Add additional ports exposed by the home assistant container integrations. Example homematic needs to expose a proxy port | `{}` |
|
||||
| `service.annotations` | Service annotations for the home-assistant GUI | `{}` |
|
||||
| `service.clusterIP` | Cluster IP for the home-assistant GUI | `` |
|
||||
| `service.externalIPs` | External IPs for the home-assistant GUI | `[]` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the home-assistant GUI | `` |
|
||||
| `service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the home-assistant GUI | `[]` |
|
||||
| `service.publishNotReadyAddresses` | Set to true if the editors (vscode or configurator) should be reachable when home assistant does not run | `false` |
|
||||
| `service.externalTrafficPolicy` | Loadbalancer externalTrafficPolicy | `` |
|
||||
| `hostNetwork` | Enable hostNetwork - might be needed for discovery to work | `false` |
|
||||
| `service.nodePort` | nodePort to listen on for the home-assistant GUI | `` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `true` |
|
||||
| `persistence.size` | Size of persistent volume claim | `5Gi` |
|
||||
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.hostPath` | The path to the config directory on the host, instead of a PVC | `nil` |
|
||||
| `persistence.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.accessMode` | Persistence access modes | `ReadWriteMany` |
|
||||
| `persistence.configSubPath` | An optional subPath for the config volumeMount | `` |
|
||||
| `git.enabled` | Use git-sync in init container | `false` |
|
||||
| `git.secret` | Git secret to use for git-sync | `git-creds` |
|
||||
| `git.syncPath` | Git sync path | `/config` |
|
||||
| `git.keyPath` | Git ssh key path | `/root/.ssh` |
|
||||
| `git.user.name` | Human-readable name in the “committer” and “author” fields | `` |
|
||||
| `git.user.email` | Email address for the “committer” and “author” fields | `` |
|
||||
| `zwave.enabled` | Enable zwave host device passthrough. Also enables privileged container mode. | `false` |
|
||||
| `zwave.device` | Device to passthrough to guest | `ttyACM0` |
|
||||
| `hostMounts` | Array of host directories to mount; can be used for devices | [] |
|
||||
| `hostMounts.name` | Name of the volume | `nil` |
|
||||
| `hostMounts.hostPath` | The path on the host machine | `nil` |
|
||||
| `hostMounts.mountPath` | The path at which to mount (optional; assumed same as hostPath) | `nil` |
|
||||
| `hostMounts.type` | The type to mount (optional, i.e., `Directory`) | `nil` |
|
||||
| `extraEnv` | Extra ENV vars to pass to the home-assistant container | `{}` |
|
||||
| `extraEnvSecrets` | Extra env vars to pass to the home-assistant container from k8s secrets - see `values.yaml` for an example | `{}` |
|
||||
| `configurator.enabled` | Enable the optional [configuration UI](https://github.com/danielperna84/hass-configurator) | `false` |
|
||||
| `configurator.image.repository` | Image repository | `k8s-at-home/hass-configurator-docker` |
|
||||
| `configurator.image.tag` | Image tag | `0.3.5-x86_64` |
|
||||
| `configurator.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `configurator.hassApiUrl` | Home Assistant API URL (e.g. 'http://home-assistant:8123/api/') - will auto-configure to proper URL if not set | `` |
|
||||
| `configurator.hassApiPassword` | Home Assistant API Password | `` |
|
||||
| `configurator.basepath` | Base path of the home assistant configuration files | `/config` |
|
||||
| `configurator.enforceBasepath` | If set to true, will prevent navigation to other directories in the configurator UI | `true` |
|
||||
| `configurator.username` | If this and password (below) are set, will require basic auth to access the configurator UI | `` |
|
||||
| `configurator.password` | If this and username (above) are set, will require basic auth to access the configurator UI. password is in the format of a sha256 hash (e.g. "test" would be "{sha256}9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08") | `` |
|
||||
| `configurator.extraEnv` | Extra ENV vars to pass to the configuration UI | `{}` |
|
||||
| `configurator.ingress.enabled` | Enables Ingress for the configurator UI | `false` |
|
||||
| `configurator.ingress.annotations` | Ingress annotations for the configurator UI | `{}` |
|
||||
| `configurator.ingress.hosts` | Ingress accepted hostnames for the configurator UI | `chart-example.local` |
|
||||
| `configurator.ingress.tls` | Ingress TLS configuration for the configurator UI | `[]` |
|
||||
| `configurator.strategy.type` | hass-configurator Deployment Strategy type | `` |
|
||||
| `configurator.tolerations` | Toleration labels for pod assignment for the configurator UI | `[]` |
|
||||
| `configurator.nodeSelector` | Node labels for pod assignment for the configurator UI | `{}` |
|
||||
| `configurator.schedulerName` | Use an alternate scheduler, e.g. "stork" for the configurator UI | `` |
|
||||
| `configurator.podAnnotations` | Affinity settings for pod assignment for the configurator UI | `{}` |
|
||||
| `configurator.resources` | CPU/Memory resource requests/limits for the configurator UI | `{}` |
|
||||
| `configurator.securityContext` | Security context to be added to hass-configurator pods for the configurator UI | `{}` |
|
||||
| `configurator.service.type` | Kubernetes service type for the configurator UI | `ClusterIP` |
|
||||
| `configurator.service.port` | Kubernetes port where the configurator UI is exposed | `3218` |
|
||||
| `configurator.service.nodePort` | nodePort to listen on for the configurator UI | `` |
|
||||
| `configurator.service.annotations` | Service annotations for the configurator UI | `{}` |
|
||||
| `configurator.service.labels` | Service labels to use for the configurator UI | `{}` |
|
||||
| `configurator.service.clusterIP` | Cluster IP for the configurator UI | `` |
|
||||
| `configurator.service.externalIPs` | External IPs for the configurator UI | `[]` |
|
||||
| `configurator.service.loadBalancerIP` | Loadbalancer IP for the configurator UI | `` |
|
||||
| `configurator.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the configurator UI | `[]` |
|
||||
| `vscode.enabled` | Enable the optional [VS Code Server Sidecar](https://github.com/cdr/code-server) | `false` |
|
||||
| `vscode.image.repository` | Image repository | `codercom/code-server` |
|
||||
| `vscode.image.tag` | Image tag | `3.4.1` |
|
||||
| `vscode.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `vscode.hassConfig` | Base path of the home assistant configuration files | `/config` |
|
||||
| `vscode.vscodePath` | Base path of the VS Code configuration files | `/config/.vscode` |
|
||||
| `vscode.password` | If this is set, will require a password to access the VS Code Server UI | `` |
|
||||
| `vscode.extraEnv` | Extra ENV vars to pass to the configuration UI | `{}` |
|
||||
| `vscode.ingress.enabled` | Enables Ingress for the VS Code UI | `false` |
|
||||
| `vscode.ingress.annotations` | Ingress annotations for the VS Code UI | `{}` |
|
||||
| `vscode.ingress.hosts` | Ingress accepted hostnames for the VS Code UI | `chart-example.local` |
|
||||
| `vscode.ingress.tls` | Ingress TLS configuration for the VS Code UI | `[]` |
|
||||
| `vscode.resources` | CPU/Memory resource requests/limits for the VS Code UI | `{}` |
|
||||
| `vscode.securityContext` | Security context to be added to hass-vscode pods for the VS Code UI | `{}` |
|
||||
| `vscode.service.type` | Kubernetes service type for the VS Code UI | `ClusterIP` |
|
||||
| `vscode.service.port` | Kubernetes port where the vscode UI is exposed | `80` |
|
||||
| `vscode.service.nodePort` | nodePort to listen on for the VS Code UI | `` |
|
||||
| `vscode.service.annotations` | Service annotations for the VS Code UI | `{}` |
|
||||
| `vscode.service.labels` | Service labels to use for the VS Code UI | `{}` |
|
||||
| `vscode.service.clusterIP` | Cluster IP for the VS Code UI | `` |
|
||||
| `vscode.service.externalIPs` | External IPs for the VS Code UI | `[]` |
|
||||
| `vscode.service.loadBalancerIP` | Loadbalancer IP for the VS Code UI | `` |
|
||||
| `vscode.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the VS Code UI | `[]` |
|
||||
| `appdaemon.enabled` | Enable the optional [Appdaemon Sidecar](https://appdaemon.readthedocs.io/en/latest/) | `false` |
|
||||
| `appdaemon.image.repository` | Image repository | `acockburn/appdaemon` |
|
||||
| `appdaemon.image.tag` | Image tag | `3.0.5` |
|
||||
| `appdaemon.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `appdaemon.haToken` | Home Assistant API token - you need to generate it in your Home Assistant profile and then copy here | `` |
|
||||
| `appdaemon.extraEnv` | Extra ENV vars to pass to the AppDaemon container | `{}` |
|
||||
| `appdaemon.ingress.enabled` | Enables Ingress for the AppDaemon UI | `false` |
|
||||
| `appdaemon.ingress.annotations` | Ingress annotations for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.ingress.hosts` | Ingress accepted hostnames for the AppDaemonUI | `appdaemon.local` |
|
||||
| `appdaemon.ingress.tls` | Ingress TLS configuration for the AppDaemon UI | `[]` |
|
||||
| `appdaemon.resources` | CPU/Memory resource requests/limits for the AppDaemon | `{}` |
|
||||
| `appdaemon.securityContext` | Security context to be added to hass-appdaemon container | `{}` |
|
||||
| `appdaemon.service.type` | Kubernetes service type for the AppDaemon UI | `ClusterIP` |
|
||||
| `appdaemon.service.port` | Kubernetes port where the AppDaemon UI is exposed | `5050` |
|
||||
| `appdaemon.service.nodePort` | nodePort to listen on for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.annotations` | Service annotations for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.service.labels` | Service labels to use for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.service.clusterIP` | Cluster IP for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.externalIPs` | External IPs for the AppDaemon UI | `[]` |
|
||||
| `appdaemon.service.loadBalancerIP` | Loadbalancer IP for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the VS Code UI | `[]` |
|
||||
| `esphome.enabled` | Enable the optional [ESPHome](https://esphome.io) deployment | `false` |
|
||||
| `mariadb.enabled` | Enable the optional [Mariadb](https://github.com/bitnami/charts) deployment | `false` |
|
||||
| `postgresql.enabled` | Enable the optional [Postgres](https://github.com/bitnami/charts) deployment | `false` |
|
||||
| `resources` | CPU/Memory resource requests/limits or the home-assistant GUI | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment or the home-assistant GUI | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment or the home-assistant GUI | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment or the home-assistant GUI | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `extraVolumes` | Any extra volumes to define for the pod | `{}` |
|
||||
| `extraVolumeMounts` | Any extra volumes mounts to define for each container of the pod | `{}` |
|
||||
| `monitoring.enabled` | Enables Monitoring support | `false` |
|
||||
| `monitoring.serviceMonitor.enabled` | Setup a ServiceMonitor to configure scraping | `false` |
|
||||
| `monitoring.serviceMonitor.namespace` | Set the namespace the ServiceMonitor should be deployed | `false` |
|
||||
| `monitoring.serviceMonitor.interval` | Set how frequently Prometheus should scrape | `30` |
|
||||
| `monitoring.serviceMonitor.labels` | Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator | `{}` |
|
||||
| `monitoring.serviceMonitor.bearerTokenFile` | Set bearerTokenFile for home-assistant auth (use long lived access tokens) | `nil` |
|
||||
| `monitoring.serviceMonitor.bearerTokenSecret` | Set bearerTokenSecret for home-assistant auth (use long lived access tokens) | `nil` |
|
||||
| `configurator.image.tag` | Image tag | `0.3.5-x86_64` |
|
||||
| `configurator.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `configurator.hassApiUrl` | Home Assistant API URL (e.g. 'http://home-assistant:8123/api/') - will auto-configure to proper URL if not set | `` |
|
||||
| `configurator.hassApiPassword` | Home Assistant API Password | `` |
|
||||
| `configurator.basepath` | Base path of the home assistant configuration files | `/config` |
|
||||
| `configurator.enforceBasepath` | If set to true, will prevent navigation to other directories in the configurator UI | `true` |
|
||||
| `configurator.username` | If this and password (below) are set, will require basic auth to access the configurator UI | `` |
|
||||
| `configurator.password` | If this and username (above) are set, will require basic auth to access the configurator UI. password is in the format of a sha256 hash (e.g. "test" would be "{sha256}9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08") | `` |
|
||||
| `configurator.extraEnv` | Extra ENV vars to pass to the configuration UI | `{}` |
|
||||
| `configurator.ingress.enabled` | Enables Ingress for the configurator UI | `false` |
|
||||
| `configurator.ingress.annotations` | Ingress annotations for the configurator UI | `{}` |
|
||||
| `configurator.ingress.hosts` | Ingress accepted hostnames for the configurator UI | `chart-example.local` |
|
||||
| `configurator.ingress.tls` | Ingress TLS configuration for the configurator UI | `[]` |
|
||||
| `configurator.strategy.type` | hass-configurator Deployment Strategy type | `` |
|
||||
| `configurator.tolerations` | Toleration labels for pod assignment for the configurator UI | `[]` |
|
||||
| `configurator.nodeSelector` | Node labels for pod assignment for the configurator UI | `{}` |
|
||||
| `configurator.schedulerName` | Use an alternate scheduler, e.g. "stork" for the configurator UI | `` |
|
||||
| `configurator.podAnnotations` | Affinity settings for pod assignment for the configurator UI | `{}` |
|
||||
| `configurator.resources` | CPU/Memory resource requests/limits for the configurator UI | `{}` |
|
||||
| `configurator.securityContext` | Security context to be added to hass-configurator pods for the configurator UI | `{}` |
|
||||
| `configurator.service.type` | Kubernetes service type for the configurator UI | `ClusterIP` |
|
||||
| `configurator.service.port` | Kubernetes port where the configurator UI is exposed | `3218` |
|
||||
| `configurator.service.nodePort` | nodePort to listen on for the configurator UI | `` |
|
||||
| `configurator.service.annotations` | Service annotations for the configurator UI | `{}` |
|
||||
| `configurator.service.labels` | Service labels to use for the configurator UI | `{}` |
|
||||
| `configurator.service.clusterIP` | Cluster IP for the configurator UI | `` |
|
||||
| `configurator.service.externalIPs` | External IPs for the configurator UI | `[]` |
|
||||
| `configurator.service.loadBalancerIP` | Loadbalancer IP for the configurator UI | `` |
|
||||
| `configurator.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the configurator UI | `[]` |
|
||||
| `vscode.enabled` | Enable the optional [VS Code Server Sidecar](https://github.com/cdr/code-server) | `false` |
|
||||
| `vscode.image.repository` | Image repository | `codercom/code-server` |
|
||||
| `vscode.image.tag` | Image tag | `3.4.1` |
|
||||
| `vscode.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `vscode.hassConfig` | Base path of the home assistant configuration files | `/config` |
|
||||
| `vscode.vscodePath` | Base path of the VS Code configuration files | `/config/.vscode` |
|
||||
| `vscode.password` | If this is set, will require a password to access the VS Code Server UI | `` |
|
||||
| `vscode.extraEnv` | Extra ENV vars to pass to the configuration UI | `{}` |
|
||||
| `vscode.args` | Optional arguments to pass into vscode image. Defaulting to "-" uses default arguments. | `-` |
|
||||
| `vscode.ingress.enabled` | Enables Ingress for the VS Code UI | `false` |
|
||||
| `vscode.ingress.annotations` | Ingress annotations for the VS Code UI | `{}` |
|
||||
| `vscode.ingress.hosts` | Ingress accepted hostnames for the VS Code UI | `chart-example.local` |
|
||||
| `vscode.ingress.tls` | Ingress TLS configuration for the VS Code UI | `[]` |
|
||||
| `vscode.resources` | CPU/Memory resource requests/limits for the VS Code UI | `{}` |
|
||||
| `vscode.securityContext` | Security context to be added to hass-vscode pods for the VS Code UI | `{}` |
|
||||
| `vscode.service.type` | Kubernetes service type for the VS Code UI | `ClusterIP` |
|
||||
| `vscode.service.port` | Kubernetes port where the vscode UI is exposed | `80` |
|
||||
| `vscode.service.nodePort` | nodePort to listen on for the VS Code UI | `` |
|
||||
| `vscode.service.annotations` | Service annotations for the VS Code UI | `{}` |
|
||||
| `vscode.service.labels` | Service labels to use for the VS Code UI | `{}` |
|
||||
| `vscode.service.clusterIP` | Cluster IP for the VS Code UI | `` |
|
||||
| `vscode.service.externalIPs` | External IPs for the VS Code UI | `[]` |
|
||||
| `vscode.service.loadBalancerIP` | Loadbalancer IP for the VS Code UI | `` |
|
||||
| `vscode.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the VS Code UI | `[]` |
|
||||
| `appdaemon.enabled` | Enable the optional [Appdaemon Sidecar](https://appdaemon.readthedocs.io/en/latest/) | `false` |
|
||||
| `appdaemon.image.repository` | Image repository | `acockburn/appdaemon` |
|
||||
| `appdaemon.image.tag` | Image tag | `3.0.5` |
|
||||
| `appdaemon.image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `appdaemon.haToken` | Home Assistant API token - you need to generate it in your Home Assistant profile and then copy here | `` |
|
||||
| `appdaemon.extraEnv` | Extra ENV vars to pass to the AppDaemon container | `{}` |
|
||||
| `appdaemon.configSubPath` | An optional subPath for the AppDaemon container's config volume mount | `appdaemon` |
|
||||
| `appdaemon.ingress.enabled` | Enables Ingress for the AppDaemon UI | `false` |
|
||||
| `appdaemon.ingress.annotations` | Ingress annotations for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.ingress.hosts` | Ingress accepted hostnames for the AppDaemonUI | `appdaemon.local` |
|
||||
| `appdaemon.ingress.tls` | Ingress TLS configuration for the AppDaemon UI | `[]` |
|
||||
| `appdaemon.resources` | CPU/Memory resource requests/limits for the AppDaemon | `{}` |
|
||||
| `appdaemon.securityContext` | Security context to be added to hass-appdaemon container | `{}` |
|
||||
| `appdaemon.service.type` | Kubernetes service type for the AppDaemon UI | `ClusterIP` |
|
||||
| `appdaemon.service.port` | Kubernetes port where the AppDaemon UI is exposed | `5050` |
|
||||
| `appdaemon.service.nodePort` | nodePort to listen on for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.annotations` | Service annotations for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.service.labels` | Service labels to use for the AppDaemon UI | `{}` |
|
||||
| `appdaemon.service.clusterIP` | Cluster IP for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.externalIPs` | External IPs for the AppDaemon UI | `[]` |
|
||||
| `appdaemon.service.loadBalancerIP` | Loadbalancer IP for the AppDaemon UI | `` |
|
||||
| `appdaemon.service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the VS Code UI | `[]` |
|
||||
| `esphome.enabled` | Enable the optional [ESPHome](https://esphome.io) deployment | `false` |
|
||||
| `mariadb.enabled` | Enable the optional [Mariadb](https://github.com/bitnami/charts) deployment | `false` |
|
||||
| `postgresql.enabled` | Enable the optional [Postgres](https://github.com/bitnami/charts) deployment | `false` |
|
||||
| `influxdb.enabled` | Enable the optional [Influxdb](https://github.com/bitnami/charts) deployment | `false` |
|
||||
| `resources` | CPU/Memory resource requests/limits or the home-assistant GUI | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment or the home-assistant GUI | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment or the home-assistant GUI | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment or the home-assistant GUI | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `extraVolumes` | Any extra volumes to define for the pod | `{}` |
|
||||
| `extraVolumeMounts` | Any extra volumes mounts to define for each container of the pod | `{}` |
|
||||
| `monitoring.enabled` | Enables Monitoring support | `false` |
|
||||
| `monitoring.serviceMonitor.enabled` | Setup a ServiceMonitor to configure scraping | `false` |
|
||||
| `monitoring.serviceMonitor.namespace` | Set the namespace the ServiceMonitor should be deployed | `false` |
|
||||
| `monitoring.serviceMonitor.interval` | Set how frequently Prometheus should scrape | `30` |
|
||||
| `monitoring.serviceMonitor.labels` | Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator | `{}` |
|
||||
| `monitoring.serviceMonitor.bearerTokenFile` | Set bearerTokenFile for home-assistant auth (use long lived access tokens) | `nil` |
|
||||
| `monitoring.serviceMonitor.bearerTokenSecret` | Set bearerTokenSecret for home-assistant auth (use long lived access tokens) | `nil` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
@@ -222,7 +226,18 @@ Much of the home assistant configuration occurs inside the various files persist
|
||||
|
||||
## Git sync secret
|
||||
|
||||
In order to sync the home assistant from a git repo, you have to store a ssh key as a kubernetes git secret
|
||||
In order to sync the home assistant from a git repo, you can optionally store an ssh key as a kubernetes git secret:
|
||||
```shell
|
||||
kubectl create secret generic git-creds --from-file=id_rsa=git/k8s_id_rsa --from-file=known_hosts=git/known_hosts --from-file=id_rsa.pub=git/k8s_id_rsa.pub
|
||||
```
|
||||
|
||||
## git-crypt support
|
||||
|
||||
When using Git sync it is possible to specify a file called `git-crypt-key` in the secret referred to in `git.secret`. When this file is present, `git-crypt unlock` will automatically be executed after the repo has been synced.
|
||||
|
||||
**Note:** `git-crypt` is not installed by default in the other images! If you wish to push changes from the VS Code or Configurator containers, you will have to make sure that it is installed.
|
||||
|
||||
The value for this secret can be obtained by running the following command in an unlocked version of your Home Assistant settings repo. It will export the unlock key, base64 encode it and copy it to your clipboard.
|
||||
```shell
|
||||
git-crypt export-key ./tmp-key && cat ./tmp-key | base64 | pbcopy && rm ./tmp-key
|
||||
```
|
||||
|
||||
@@ -30,3 +30,24 @@ Create chart name and version as used by the chart label.
|
||||
{{- define "home-assistant.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create argument list for vscode image.
|
||||
*/}}
|
||||
{{- define "home-assistant.vscode.args" -}}
|
||||
{{- if empty .Values.vscode.args -}}
|
||||
{{- "" -}}
|
||||
{{- else if (eq (typeOf .Values.vscode.args) "string") -}}
|
||||
- --port={{ .Values.vscode.service.port }}
|
||||
{{- if not (.Values.vscode.password) }}
|
||||
- --auth=none
|
||||
{{- end }}
|
||||
{{- if .Values.vscode.vscodePath }}
|
||||
- --extensions-dir={{ .Values.vscode.vscodePath }}
|
||||
- --user-data-dir={{ .Values.vscode.vscodePath }}
|
||||
- {{ .Values.vscode.hassConfig }}
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
{{ toYaml .Values.vscode.args }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- $args := include "home-assistant.vscode.args" . -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -48,11 +49,33 @@ spec:
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
command: ['sh', '-c', '[ "$(ls {{ .Values.git.syncPath }})" ] || git clone {{ .Values.git.repo }} {{ .Values.git.syncPath }}']
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- set -e;
|
||||
if [ -d "{{ .Values.git.syncPath }}/.git" ];
|
||||
then
|
||||
git -C "{{ .Values.git.syncPath }}" pull || true;
|
||||
else
|
||||
if [ "$(ls -A {{ .Values.git.syncPath }})" ];
|
||||
then
|
||||
git clone --depth 2 "{{ .Values.git.repo }}" /tmp/repo;
|
||||
cp -rf /tmp/repo/.git "{{ .Values.git.syncPath }}";
|
||||
cd "{{ .Values.git.syncPath }}";
|
||||
git checkout -f;
|
||||
else
|
||||
git clone --depth 2 "{{ .Values.git.repo }}" "{{ .Values.git.syncPath }}";
|
||||
fi;
|
||||
fi;
|
||||
if [ -f "{{ .Values.git.keyPath }}/git-crypt-key" ];
|
||||
then
|
||||
cd {{ .Values.git.syncPath }};
|
||||
git-crypt unlock "{{ .Values.git.keyPath }}/git-crypt-key";
|
||||
fi;
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
subPath: {{ default "" .Values.persistence.configSubPath }}
|
||||
- mountPath: {{ .Values.git.keyPath }}
|
||||
name: git-secret
|
||||
{{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }}{{ end }}
|
||||
@@ -127,6 +150,7 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
subPath: {{ default "" .Values.persistence.configSubPath }}
|
||||
{{- if .Values.zwave.enabled }}
|
||||
- mountPath: /dev/ttyACM0
|
||||
name: ttyacm
|
||||
@@ -226,6 +250,7 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
subPath: {{ default "" .Values.persistence.configSubPath }}
|
||||
{{- if .Values.git.enabled }}
|
||||
- mountPath: {{ .Values.git.keyPath }}
|
||||
name: git-secret
|
||||
@@ -249,15 +274,9 @@ spec:
|
||||
imagePullPolicy: {{ .Values.vscode.image.pullPolicy }}
|
||||
workingDir: {{ .Values.vscode.hassConfig }}
|
||||
args:
|
||||
- --port={{ .Values.vscode.service.port }}
|
||||
{{- if not (.Values.vscode.password) }}
|
||||
- --auth=none
|
||||
{{- end }}
|
||||
{{- if .Values.vscode.vscodePath }}
|
||||
- --extensions-dir={{ .Values.vscode.vscodePath }}
|
||||
- --user-data-dir={{ .Values.vscode.vscodePath }}
|
||||
- {{ .Values.vscode.hassConfig }}
|
||||
{{- end }}
|
||||
{{- with $args }}
|
||||
{{ . | indent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: vscode
|
||||
containerPort: {{ .Values.vscode.service.port }}
|
||||
@@ -289,6 +308,7 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
subPath: {{ default "" .Values.persistence.configSubPath }}
|
||||
{{- if .Values.git.enabled }}
|
||||
- mountPath: {{ .Values.git.keyPath }}
|
||||
name: git-secret
|
||||
@@ -348,8 +368,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /ha-conf
|
||||
name: config
|
||||
subPath: {{ default "" .Values.persistence.configSubPath }}
|
||||
- mountPath: /conf
|
||||
subPath: appdaemon
|
||||
subPath: {{ default "appdaemon" .Values.appdaemon.configSubPath }}
|
||||
name: config
|
||||
{{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 10 }}{{ end }}
|
||||
{{- if .Values.usePodSecurityContext }}
|
||||
@@ -396,6 +417,7 @@ spec:
|
||||
secret:
|
||||
defaultMode: 256
|
||||
secretName: {{ .Values.git.secret }}
|
||||
optional: true
|
||||
{{ end }}
|
||||
{{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: homeassistant/home-assistant
|
||||
tag: 0.114.0
|
||||
tag: 0.115.2
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecrets: []
|
||||
|
||||
@@ -84,6 +84,9 @@ persistence:
|
||||
# hostPath: /path/to/the/config/folder
|
||||
accessMode: ReadWriteOnce
|
||||
size: 5Gi
|
||||
# If you use an existingClaim is sometimes useful to specify a subPath within the volume to mount instead of mounting the root.
|
||||
# This allows several charts to share a common volume. For example for configurations.
|
||||
# configSubPath: configs/hass
|
||||
|
||||
## Additional hass container environment variable
|
||||
## For instance to add a http_proxy
|
||||
@@ -118,12 +121,9 @@ usePodSecurityContext: true
|
||||
git:
|
||||
enabled: false
|
||||
|
||||
## we just use the hass-configurator container image
|
||||
## you can use any image which has git and openssh installed
|
||||
##
|
||||
image:
|
||||
repository: causticlab/hass-configurator-docker
|
||||
tag: 0.3.5-x86_64
|
||||
repository: k8sathome/git-crypt
|
||||
tag: 2020.09.28
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Specify the command that runs in the git-sync container to pull in configuration.
|
||||
@@ -134,7 +134,7 @@ git:
|
||||
name: ""
|
||||
email: ""
|
||||
|
||||
# repo:
|
||||
repo: ""
|
||||
secret: git-creds
|
||||
syncPath: /config
|
||||
keyPath: /root/.ssh
|
||||
@@ -250,6 +250,10 @@ vscode:
|
||||
##
|
||||
extraEnv: {}
|
||||
|
||||
## Set to "-" to use default argument list
|
||||
## Otherwise convert to list of arguments
|
||||
args: "-"
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
@@ -295,6 +299,11 @@ appdaemon:
|
||||
##
|
||||
extraEnv: {}
|
||||
|
||||
# If you use an existingClaim for the config volume then it is sometimes useful to specify a subPath
|
||||
# within the volume to mount instead of mounting the root.
|
||||
# This allows several charts to share a common volume. For example for configurations.
|
||||
# configSubPath: configs/hass/appdaemon
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
@@ -334,6 +343,8 @@ esphome:
|
||||
mountPath: /config/secrets.yaml
|
||||
subPath: secrets.yaml
|
||||
|
||||
# Enabled mariadb
|
||||
# ... for more options see https://github.com/bitnami/charts/tree/master/bitnami/mariadb
|
||||
mariadb:
|
||||
enabled: false
|
||||
db:
|
||||
@@ -348,6 +359,9 @@ mariadb:
|
||||
persistence:
|
||||
enabled: false
|
||||
# storageClass: ""
|
||||
|
||||
# Enabled postgres
|
||||
# ... for more options see https://github.com/bitnami/charts/tree/master/bitnami/postgresql
|
||||
postgresql:
|
||||
enabled: false
|
||||
global:
|
||||
@@ -359,6 +373,19 @@ postgresql:
|
||||
enabled: false
|
||||
# storageClass: ""
|
||||
|
||||
# Enable influxdb
|
||||
# ... for more options see https://github.com/bitnami/charts/tree/master/bitnami/influxdb
|
||||
influxdb:
|
||||
enabled: false
|
||||
architecture: standalone
|
||||
database: home_assistant
|
||||
authEnabled: false
|
||||
persistence:
|
||||
enabled: false
|
||||
# storageClass: ""
|
||||
# size: 8Gi
|
||||
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
.vscode/
|
||||
17
charts/homebridge/Chart.yaml
Normal file
17
charts/homebridge/Chart.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: v2
|
||||
appVersion: 3.1.0
|
||||
version: 1.0.1
|
||||
name: homebridge
|
||||
description: A lightweight NodeJS server that emulates the iOS HomeKit API
|
||||
type: application
|
||||
keywords:
|
||||
- homebridge
|
||||
- homekit
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/homebridge
|
||||
icon: https://avatars0.githubusercontent.com/u/38217527?s=400&v=4?sanitize=true
|
||||
sources:
|
||||
- https://homebridge.io/
|
||||
- https://github.com/oznu/docker-homebridge
|
||||
maintainers:
|
||||
- name: bjw-s
|
||||
email: bjw-s@users.noreply.github.com
|
||||
100
charts/homebridge/README.md
Normal file
100
charts/homebridge/README.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Homebridge
|
||||
|
||||
This is a helm chart for [Homebridge](https://homebridge.io) based on [Docker Homebridge](https://github.com/oznu/docker-homebridge).
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
helm install k8s-at-home/homebridge
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install --name my-release k8s-at-home/homebridge
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```shell
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Home Assistant chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- |
|
||||
| `image.repository` | Image repository | `oznu/homebridge` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/oznu/homebridge/tags). | `3.1.0` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Secrets to use when pulling the image | `[]` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Specify the container timezone | `UTC` |
|
||||
| `puid` | process userID the instance should run as | `1000` |
|
||||
| `pgid` | process groupID the instance should run as | `1000` |
|
||||
| `config.enableUI` | Enable the Homebridge UI plugin | `true` |
|
||||
| `config.plugins` | Additional Homebridge plugins to install at container startup | `[]` |
|
||||
| `config.additionalPackages` | Additional Alpine packages to install at container statup | `[] ` |
|
||||
| `probes.liveness.enabled` | Use the livenessProbe? | `true` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.enabled` | Use the readinessProbe? | `true` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.enabled` | Use the startupProbe? (new in kubernetes 1.16) | `false` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the homebridge GUI | `ClusterIP` |
|
||||
| `service.httpPort` | Kubernetes port where the homebridge GUI is exposed | `8123` |
|
||||
| `service.annotations` | Service annotations for the homebridge GUI | `{}` |
|
||||
| `service.clusterIP` | Cluster IP for the homebridge GUI | `` |
|
||||
| `service.externalIPs` | External IPs for the homebridge GUI | `[]` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the homebridge GUI | `` |
|
||||
| `service.loadBalancerSourceRanges` | Loadbalancer client IP restriction range for the homebridge GUI | `[]` |
|
||||
| `service.externalTrafficPolicy` | Loadbalancer externalTrafficPolicy | `` |
|
||||
| `hostNetwork` | Enable hostNetwork - needed for discovery to work | `false` |
|
||||
| `service.nodePort` | nodePort to listen on for the homebridge GUI | `` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `true` |
|
||||
| `persistence.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.accessMode` | Persistence access modes | `ReadWriteMany` |
|
||||
| `persistence.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `extraEnvs` | Extra ENV vars to pass to the homebridge container | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits or the homebridge GUI | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment or the homebridge GUI | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment or the homebridge GUI | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment or the homebridge GUI | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```shell
|
||||
helm install --name my-release \
|
||||
--set timezone="UTC" \
|
||||
k8s-at-home/homebridge
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```shell
|
||||
helm install --name my-release -f values.yaml k8s-at-home/homebridge
|
||||
```
|
||||
|
||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
||||
22
charts/homebridge/templates/NOTES.txt
Normal file
22
charts/homebridge/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
{{- if .Values.config.enableUI }}
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ $ingressPath }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "homebridge.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "homebridge.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "homebridge.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.httpPort }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "homebridge.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:{{ .Values.service.httpPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
62
charts/homebridge/templates/_helpers.tpl
Normal file
62
charts/homebridge/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "homebridge.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "homebridge.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "homebridge.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "homebridge.labels" -}}
|
||||
helm.sh/chart: {{ include "homebridge.chart" . }}
|
||||
{{ include "homebridge.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "homebridge.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "homebridge.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "homebridge.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "homebridge.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
21
charts/homebridge/templates/configmap.yaml
Normal file
21
charts/homebridge/templates/configmap.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "homebridge.fullname" . }}
|
||||
labels:
|
||||
{{ include "homebridge.labels" . | indent 4 }}
|
||||
data:
|
||||
startup.sh: |
|
||||
#!/bin/sh
|
||||
|
||||
# Docker Homebridge startup.sh overriden by Helm Chart
|
||||
|
||||
# Plugins
|
||||
{{- range .Values.config.plugins }}
|
||||
npm install {{ . }}
|
||||
{{- end }}
|
||||
|
||||
# Packages
|
||||
{{- range .Values.config.additionalPackages }}
|
||||
apk add --no-cache {{ . }}
|
||||
{{- end }}
|
||||
134
charts/homebridge/templates/deployment.yaml
Normal file
134
charts/homebridge/templates/deployment.yaml
Normal file
@@ -0,0 +1,134 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "homebridge.fullname" . }}
|
||||
labels:
|
||||
{{- include "homebridge.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "homebridge.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "homebridge.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "homebridge.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- if .Values.hostNetwork }}
|
||||
hostNetwork: {{ .Values.hostNetwork }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
{{- if .Values.timezone }}
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.puid }}
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
{{- end }}
|
||||
{{- if .Values.pgid }}
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
{{- end }}
|
||||
{{- if .Values.config.enableUI }}
|
||||
- name: HOMEBRIDGE_CONFIG_UI
|
||||
value: "1"
|
||||
- name: HOMEBRIDGE_CONFIG_UI_PORT
|
||||
value: {{ .Values.service.httpPort | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraEnvs }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: homebridge
|
||||
containerPort: {{ .Values.service.homebridgePort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.config.enableUI }}
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.httpPort }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /homebridge
|
||||
name: config
|
||||
- mountPath: /homebridge/startup.sh
|
||||
name: homebridge-configmap
|
||||
subPath: startup.sh
|
||||
{{- if .Values.config.enableUI }}
|
||||
{{- if .Values.probes.liveness.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.probes.readiness.enabled }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.probes.startup.enabled }}
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
failureThreshold: {{ .Values.probes.startup.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.startup.periodSeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template "homebridge.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: homebridge-configmap
|
||||
projected:
|
||||
defaultMode: 0444
|
||||
sources:
|
||||
- configMap:
|
||||
name: {{ template "homebridge.fullname" . }}
|
||||
items:
|
||||
- key: startup.sh
|
||||
path: startup.sh
|
||||
mode: 0755
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
39
charts/homebridge/templates/ingress.yaml
Normal file
39
charts/homebridge/templates/ingress.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
{{- if and .Values.config.enableUI .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "homebridge.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "homebridge.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
25
charts/homebridge/templates/pvc.yaml
Normal file
25
charts/homebridge/templates/pvc.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "homebridge.fullname" . }}-config
|
||||
{{- if .Values.persistence.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "homebridge.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
{{- if .Values.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: {{ .Values.persistence.storageClass | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,25 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "ombi.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
name: {{ include "homebridge.fullname" . }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "homebridge.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
@@ -32,22 +23,26 @@ spec:
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.service.externalIPs | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
- port: {{ .Values.service.homebridgePort }}
|
||||
targetPort: homebridge
|
||||
protocol: TCP
|
||||
name: homebridge
|
||||
{{- if .Values.config.enableUI }}
|
||||
- port: {{ .Values.service.httpPort }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- end }}
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- include "homebridge.selectorLabels" . | nindent 4 }}
|
||||
12
charts/homebridge/templates/serviceaccount.yaml
Normal file
12
charts/homebridge/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "homebridge.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "homebridge.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
121
charts/homebridge/values.yaml
Normal file
121
charts/homebridge/values.yaml
Normal file
@@ -0,0 +1,121 @@
|
||||
# Default values for homebridge.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: oznu/homebridge
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "3.1.0"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: "UTC"
|
||||
puid: 1000
|
||||
pgid: 1000
|
||||
|
||||
config:
|
||||
enableUI: true
|
||||
plugins: []
|
||||
# - homebridge-hue
|
||||
additionalPackages: []
|
||||
# - bash
|
||||
|
||||
extraEnvs: []
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
homebridgePort: 51826
|
||||
httpPort: 8080
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
hostNetwork: false
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
## homebridge data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
|
||||
probes:
|
||||
liveness:
|
||||
enabled: true
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
enabled: true
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
startup:
|
||||
enabled: false
|
||||
failureThreshold: 30
|
||||
periodSeconds: 10
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -14,10 +14,10 @@
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
.vscode/
|
||||
17
charts/intel-gpu-plugin/Chart.yaml
Normal file
17
charts/intel-gpu-plugin/Chart.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: v2
|
||||
name: intel-gpu-plugin
|
||||
version: 1.0.0
|
||||
appVersion: 0.18.1
|
||||
description: The Intel GPU plugin facilitates offloading the processing of computation intensive workloads to GPU hardware
|
||||
keywords:
|
||||
- kubernetes
|
||||
- cluster
|
||||
- hardware
|
||||
- gpu
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/intel-gpu-plugin
|
||||
icon: https://avatars0.githubusercontent.com/u/17888862?s=400&v=4
|
||||
sources:
|
||||
- https://github.com/intel/intel-device-plugins-for-kubernetes/blob/master/cmd/gpu_plugin
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
73
charts/intel-gpu-plugin/README.md
Normal file
73
charts/intel-gpu-plugin/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# intel-gpu-plugin helm chart
|
||||
|
||||
This is a helm chart that will deploy [intel-gpu-plugin](https://github.com/intel/intel-device-plugins-for-kubernetes/blob/master/cmd/gpu_plugin) as a DaemonSet.
|
||||
|
||||
The GPU plugin facilitates offloading the processing of computation intensive workloads to GPU hardware.
|
||||
|
||||
## TL;DR
|
||||
|
||||
```shell
|
||||
helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
helm install k8s-at-home/intel-gpu-plugin
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install my-release k8s-at-home/intel-gpu-plugin
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```shell
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/intel-gpu-plugin/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------------------- | -------------------------------------------------------------------------------------------- | ----------------------------------------------------- |
|
||||
| `image.repository` | Image repository | `intel/intel-gpu-plugin` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/intel/intel-gpu-plugin/tags). | `0.18.1` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```shell
|
||||
helm install my-release \
|
||||
--set image.pullPolicy="Always" \
|
||||
k8s-at-home/intel-gpu-plugin
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```shell
|
||||
helm install my-release -f values.yaml k8s-at-home/intel-gpu-plugin
|
||||
```
|
||||
|
||||
### Node Feature Discovery
|
||||
|
||||
If your cluster runs [Node Feature Discovery](https://github.com/k8s-at-home/charts/blob/master/charts/node-feature-discovery), you can deploy the device plugin only on nodes with Intel GPU by specifying the desired `nodeSelector` or `affinity` in your values. For example (make sure to update to your exact feature label):
|
||||
|
||||
```yaml
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: feature.node.kubernetes.io/pci-0300_8086.present
|
||||
operator: In
|
||||
values:
|
||||
- "true"
|
||||
```
|
||||
62
charts/intel-gpu-plugin/templates/_helpers.tpl
Normal file
62
charts/intel-gpu-plugin/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.labels" -}}
|
||||
helm.sh/chart: {{ include "intel-gpu-plugin.chart" . }}
|
||||
{{ include "intel-gpu-plugin.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "intel-gpu-plugin.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "intel-gpu-plugin.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "intel-gpu-plugin.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
71
charts/intel-gpu-plugin/templates/daemonset.yaml
Normal file
71
charts/intel-gpu-plugin/templates/daemonset.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ include "intel-gpu-plugin.fullname" . }}
|
||||
labels:
|
||||
{{- include "intel-gpu-plugin.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "intel-gpu-plugin.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "intel-gpu-plugin.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "intel-gpu-plugin.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: devfs
|
||||
mountPath: /dev/dri
|
||||
readOnly: true
|
||||
- name: sysfs
|
||||
mountPath: /sys/class/drm
|
||||
readOnly: true
|
||||
- name: kubeletsockets
|
||||
mountPath: /var/lib/kubelet/device-plugins
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: devfs
|
||||
hostPath:
|
||||
path: /dev/dri
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys/class/drm
|
||||
- name: kubeletsockets
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
12
charts/intel-gpu-plugin/templates/serviceaccount.yaml
Normal file
12
charts/intel-gpu-plugin/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "intel-gpu-plugin.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "intel-gpu-plugin.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
50
charts/intel-gpu-plugin/values.yaml
Normal file
50
charts/intel-gpu-plugin/values.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Default values for intel-gpu-plugin.
|
||||
|
||||
image:
|
||||
repository: intel/intel-gpu-plugin
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 0.18.1
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.13.446-ls55
|
||||
appVersion: v0.16.1045
|
||||
description: API Support for your favorite torrent trackers
|
||||
name: jackett
|
||||
version: 3.0.0
|
||||
version: 4.0.0
|
||||
keywords:
|
||||
- jackett
|
||||
- torrent
|
||||
@@ -14,3 +14,8 @@ sources:
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ^1.0.0
|
||||
alias: jackett
|
||||
|
||||
@@ -28,78 +28,35 @@ helm delete my-release --purge
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/jackett` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/jackett/tags/).| `v0.12.1132-ls37`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the Jackett instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the Jackett instance should run as | `1001` |
|
||||
| `pgid` | process groupID the Jackett instance should run as | `1001` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.periodSeconds` | Specify liveness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.periodSeconds` | Specify readiness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.initialDelaySeconds` | Specify startup `initialDelaySeconds` parameter for the deployment | `5` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `30` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the Jackett GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the Jackett GUI is exposed| `9117` |
|
||||
| `Service.annotations` | Service annotations for the Jackett GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the Jackett GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.torrentblackhole.enabled` | Use persistent volume to store torrent files | `false` |
|
||||
| `persistence.torrentblackhole.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.torrentblackhole.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.torrentblackhole.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.torrentblackhole.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.torrentblackhole.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.torrentblackhole.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install jackett \
|
||||
--set jackett.env.TZ="America/New York" \
|
||||
k8s-at-home/jackett
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
|
||||
chart. For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml k8s-at-home/jackett
|
||||
helm install jackett k8s-at-home/jackett --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
jackett:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/jackett/values.yaml) file. It has several commented out suggested values.
|
||||
---
|
||||
10
charts/jackett/ci/ct-values.yaml
Normal file
10
charts/jackett/ci/ct-values.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
jackett:
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: jackett
|
||||
tag: v0.16.1045-ls14
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9117
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -1,19 +1,20 @@
|
||||
{{- $svcPort := .Values.jackett.service.port -}}
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- if .Values.jackett.ingress.enabled }}
|
||||
{{- range .Values.jackett.ingress.hosts }}
|
||||
http{{ if $.Values.jackett.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.jackett.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "jackett.fullname" . }})
|
||||
{{- else if contains "NodePort" .Values.jackett.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "media-common.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
{{- else if contains "LoadBalancer" .Values.jackett.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "jackett.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "jackett.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "jackett.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:9117 to use your application"
|
||||
kubectl port-forward $POD_NAME 9117:80
|
||||
{{- end }}
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "media-common.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "media-common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ $svcPort }}
|
||||
{{- else if contains "ClusterIP" .Values.jackett.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "media-common.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:{{ $svcPort }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "jackett.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
helm.sh/chart: {{ include "jackett.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,117 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jackett.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
helm.sh/chart: {{ include "jackett.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9117
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
|
||||
startupProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.startup.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.startup.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.startup.periodSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
- mountPath: /downloads
|
||||
name: torrentblackhole
|
||||
{{- if .Values.persistence.torrentblackhole.subPath }}
|
||||
subPath: "{{ .Values.persistence.torrentblackhole.subPath }}"
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "jackett.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{ end }}
|
||||
- name: torrentblackhole
|
||||
{{- if .Values.persistence.torrentblackhole.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.torrentblackhole.existingClaim }}{{ .Values.persistence.torrentblackhole.existingClaim }}{{- else }}{{ template "jackett.fullname" . }}-torrentblackhole{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
22
charts/jackett/templates/pvc.yaml
Normal file
22
charts/jackett/templates/pvc.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
{{- if and .Values.jackett.persistence.torrentblackhole.enabled (not .Values.jackett.persistence.torrentblackhole.existingClaim) }}
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}-downloads
|
||||
{{- if .Values.jackett.persistence.torrentblackhole.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.jackett.persistence.torrentblackhole.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.jackett.persistence.torrentblackhole.size | quote }}
|
||||
{{- if .Values.jackett.persistence.torrentblackhole.storageClass }}
|
||||
storageClassName: {{ if (eq "-" .Values.jackett.persistence.torrentblackhole.storageClass) }}""{{- else }}{{ .Values.jackett.persistence.torrentblackhole.storageClass | quote}}{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,53 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "jackett.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
helm.sh/chart: {{ include "jackett.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.torrentblackhole.enabled (not .Values.persistence.torrentblackhole.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "jackett.fullname" . }}-torrentblackhole
|
||||
{{- if .Values.persistence.torrentblackhole.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "jackett.name" . }}
|
||||
helm.sh/chart: {{ include "jackett.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.torrentblackhole.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.torrentblackhole.size | quote }}
|
||||
{{- if .Values.persistence.torrentblackhole.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.torrentblackhole.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.torrentblackhole.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,132 +1,43 @@
|
||||
# Default values for Jackett.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/jackett
|
||||
tag: v0.13.446-ls55
|
||||
pullPolicy: IfNotPresent
|
||||
jackett:
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: jackett
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.16.1045-ls14
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
service:
|
||||
port: 9117
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
readiness:
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
startup:
|
||||
initialDelaySeconds: 5
|
||||
failureThreshold: 30
|
||||
periodSeconds: 10
|
||||
env: {}
|
||||
# TZ: UTC
|
||||
# PUID: 1001
|
||||
# PGID: 1001
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
persistence:
|
||||
torrentblackhole:
|
||||
enabled: false
|
||||
## Jackett torrent torrentblackhole Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
# storageClass: "-"
|
||||
# accessMode: ReadWriteOnce
|
||||
# size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
# skipuninstall: false
|
||||
# existingClaim: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
additionalVolumes:
|
||||
- name: torrentblackhole
|
||||
emptyDir: {}
|
||||
## When using persistence.torrentblackhole.enabled: true, adjust this to:
|
||||
# persistentVolumeClaim:
|
||||
# claimName: jackett-torrentblackhole
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 9117
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## Jackett configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
torrentblackhole:
|
||||
enabled: false
|
||||
## Jackett torrentblackhole directory volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
additionalVolumeMounts:
|
||||
- name: torrentblackhole
|
||||
mountPath: /downloads
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: v10.5.3-ls45
|
||||
appVersion: 10.6.3
|
||||
description: Jellyfin is a Free Software Media System
|
||||
name: jellyfin
|
||||
version: 2.0.0
|
||||
version: 2.0.1
|
||||
keywords:
|
||||
- Jellyfin
|
||||
- mediaplayer
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
image:
|
||||
repository: linuxserver/jellyfin
|
||||
tag: v10.5.3-ls45
|
||||
tag: 10.6.3-1-ls70
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
name: lazylibrarian
|
||||
description: A Helm chart for deploying LazyLibrarian
|
||||
version: 2.0.0
|
||||
appVersion: 581cdfb3-ls23
|
||||
version: 2.0.1
|
||||
appVersion: 1.7.2
|
||||
keywords:
|
||||
- lazylibrarian
|
||||
- ebooks
|
||||
|
||||
@@ -13,7 +13,7 @@ image:
|
||||
repository: linuxserver/lazylibrarian
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: 581cdfb3-ls23
|
||||
tag: 2551a8bc-ls25
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
|
||||
@@ -1,17 +1,31 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.7.1.1381-ls7
|
||||
description: Looks and smells like Sonarr but made for music.
|
||||
name: lidarr
|
||||
version: 3.0.0
|
||||
description: Looks and smells like Sonarr but made for music
|
||||
type: application
|
||||
version: 4.0.1
|
||||
appVersion: 0.7.1.1785-ls18
|
||||
keywords:
|
||||
- lidarr
|
||||
- usenet
|
||||
- bittorrent
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/lidarr
|
||||
icon: https://lidarr.audio/img/logo.png
|
||||
icon: https://github.com/lidarr/Lidarr/blob/develop/Logo/512.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/lidarr/
|
||||
- https://github.com/lidarr/Lidarr/
|
||||
- https://github.com/Lidarr/Lidarr
|
||||
- https://hub.docker.com/r/linuxserver/lidarr
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
url: https://github.com/dirtycajunrice
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ^1.0.0
|
||||
alias: lidarr
|
||||
annotations:
|
||||
artifacthub.io/links: |
|
||||
- name: App Source
|
||||
url: https://github.com/Lidarr/Lidarr
|
||||
- name: Default Docker Image
|
||||
url: https://hub.docker.com/r/linuxserver/lidarr
|
||||
artifacthub.io/maintainers: |
|
||||
- name: Nicholas St. Germain
|
||||
email: nick@cajun.pro
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,115 +1,79 @@
|
||||
# lidarr music download client
|
||||
# Lidarr | Looks and smells like Sonarr but made for music
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
|
||||
* Deploys [Lidarr](https://github.com/lidarr/Lidarr)
|
||||
|
||||
This is a helm chart for [lidarr](https://github.com/lidarr/Lidarr) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `lidarr`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/lidarr
|
||||
helm install lidarr k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 4.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `lidarr:` key.
|
||||
|
||||
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Lidarr copying files rather than linking; using additional storage without the user's knowledge.
|
||||
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would
|
||||
be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because
|
||||
each PVC exposed to the pod as a separate filesystem. It resulted in Lidarr copying files rather than linking;
|
||||
using additional storage without the user's knowledge.
|
||||
|
||||
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or
|
||||
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
|
||||
1. [Uninstall](#uninstalling-the-chart) your current release
|
||||
2. On your backing store, organize your media, ie. media/music, media/downloads
|
||||
3. If using a pre-existing PVC, create a single new PVC for all of your media
|
||||
4. Refer to the [configuration](#configuration) for updates to the chart values
|
||||
5. Re-install the chart
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's `Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the `Root Folder` and hit save.
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's
|
||||
`Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the
|
||||
`Root Folder` and hit save.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `lidarr` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall lidarr
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/lidarr` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/lidarr/tags/).| `0.7.1.1381-ls7`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the lidarr instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the lidarr instance should run as | `1001` |
|
||||
| `pgid` | process groupID the lidarr instance should run as | `1001` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the lidarr GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the lidarr GUI is exposed| `8686` |
|
||||
| `Service.annotations` | Service annotations for the lidarr GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the lidarr GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.media.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.media.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install lidarr \
|
||||
--set lidarr.env.TZ="America/New York" \
|
||||
k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
|
||||
chart. For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/lidarr
|
||||
helm install lidarr k8s-at-home/lidarr --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
lidarr:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use`existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/lidarr/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,110 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "lidarr.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8686
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: media
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-media{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "lidarr.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-media
|
||||
{{- if .Values.persistence.media.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.media.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.media.size | quote }}
|
||||
{{- if .Values.persistence.media.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.media.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.media.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,132 +1,10 @@
|
||||
# Default values for lidarr.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/lidarr
|
||||
tag: 0.7.1.1381-ls7
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8686
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## lidarr configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
media:
|
||||
enabled: true
|
||||
## lidarr media volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
lidarr:
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: lidarr
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 0.7.1.1785-ls18
|
||||
service:
|
||||
port: 8686
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
.vscode/
|
||||
12
charts/media-common-openvpn/Chart.yaml
Normal file
12
charts/media-common-openvpn/Chart.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v2
|
||||
name: media-common-openvpn
|
||||
description: OpenVPN add-on for `media-common`-based charts
|
||||
type: library
|
||||
keywords:
|
||||
- media-common
|
||||
- openvpn
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common-openvpn
|
||||
maintainers:
|
||||
- name: bjw-s
|
||||
email: bjw-s@users.noreply.github.com
|
||||
version: 1.0.1
|
||||
16
charts/media-common-openvpn/README.md
Normal file
16
charts/media-common-openvpn/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Add-on chart for k8s@home media charts
|
||||
|
||||
This chart provides a single maintainable OpenVPN add-on to the `meda-common` chart.
|
||||
|
||||
## Configuration
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common-openvpn/values.yaml) file.
|
||||
It has several commented out suggested values.
|
||||
|
||||
These values will normally be nested as it is a dependency, for example:
|
||||
```yaml
|
||||
radarr:
|
||||
openvpn:
|
||||
enabled: true
|
||||
<values>
|
||||
```
|
||||
24
charts/media-common-openvpn/templates/_configmap.tpl
Normal file
24
charts/media-common-openvpn/templates/_configmap.tpl
Normal file
@@ -0,0 +1,24 @@
|
||||
{{/*
|
||||
The OpenVPN configmaps to be inserted
|
||||
*/}}
|
||||
{{- define "media-common.openvpn.configmap" -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}-openvpn
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- if .Values.openvpn.vpnConf }}
|
||||
vpnConf: |-
|
||||
{{- .Values.openvpn.vpnConf | nindent 4}}
|
||||
{{- end }}
|
||||
{{ if .Values.openvpn.scripts.up }}
|
||||
up.sh: |-
|
||||
{{- .Values.openvpn.scripts.up | nindent 4}}
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.scripts.down }}
|
||||
down.sh: |-
|
||||
{{- .Values.openvpn.scripts.down | nindent 4}}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
48
charts/media-common-openvpn/templates/_container.tpl
Normal file
48
charts/media-common-openvpn/templates/_container.tpl
Normal file
@@ -0,0 +1,48 @@
|
||||
{{/*
|
||||
The OpenVPN container(s) to be inserted
|
||||
*/}}
|
||||
{{- define "media-common.openvpn.container" -}}
|
||||
- name: openvpn
|
||||
image: "{{ .Values.openvpn.image.repository }}:{{ .Values.openvpn.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.openvpn.image.pullPolicy }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["NET_ADMIN"]
|
||||
{{- if .Values.openvpn.env }}
|
||||
env:
|
||||
{{- range $k, $v := .Values.openvpn.env }}
|
||||
- name: {{ $k }}
|
||||
value: {{ $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if or .Values.openvpn.auth .Values.openvpn.authSecret }}
|
||||
- secretRef:
|
||||
{{- if .Values.openvpn.authSecret }}
|
||||
name: {{ .Values.openvpn.authSecret }}
|
||||
{{- else }}
|
||||
name: {{ template "media-common.fullname" . }}-openvpn
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if .Values.openvpn.vpnConf }}
|
||||
- name: openvpnconf
|
||||
mountPath: /vpn/vpn.conf
|
||||
subPath: vpnConf
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.scripts.up }}
|
||||
- name: openvpnconf
|
||||
mountPath: /vpn/up.sh
|
||||
subPath: up.sh
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.scripts.down }}
|
||||
- name: openvpnconf
|
||||
mountPath: /vpn/down.sh
|
||||
subPath: down.sh
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.additionalVolumeMounts }}
|
||||
{{- toYaml .Values.openvpn.additionalVolumeMounts | nindent 2 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.openvpn.livenessProbe | nindent 4 }}
|
||||
{{- end -}}
|
||||
@@ -1,12 +1,16 @@
|
||||
{{- if .Values.openvpn.networkPolicy.enabled }}
|
||||
{{/*
|
||||
The OpenVPN networkpolicy to be inserted
|
||||
*/}}
|
||||
{{- define "media-common.openvpn.networkpolicy" -}}
|
||||
{{- if .Values.openvpn.networkPolicy.enabled -}}
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "nzbget.fullname" . }}-deny-all-netpol
|
||||
name: {{ template "media-common.fullname" . }}-deny-all-netpol
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
||||
app.kubernetes.io/name: {{ include "media-common.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
policyTypes:
|
||||
- Egress
|
||||
@@ -14,4 +18,5 @@ spec:
|
||||
{{- if .Values.openvpn.networkPolicy.egress }}
|
||||
{{- .Values.openvpn.networkPolicy.egress | toYaml | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
15
charts/media-common-openvpn/templates/_secret.tpl
Normal file
15
charts/media-common-openvpn/templates/_secret.tpl
Normal file
@@ -0,0 +1,15 @@
|
||||
{{/*
|
||||
The OpenVPN secrets to be inserted
|
||||
*/}}
|
||||
{{- define "media-common.openvpn.secret" -}}
|
||||
{{- if .Values.openvpn.auth -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}-openvpn
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
data:
|
||||
VPN_AUTH: {{ .Values.openvpn.auth | b64enc }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
25
charts/media-common-openvpn/templates/_volume.tpl
Normal file
25
charts/media-common-openvpn/templates/_volume.tpl
Normal file
@@ -0,0 +1,25 @@
|
||||
{{/*
|
||||
The OpenVPN volumes to be inserted
|
||||
*/}}
|
||||
{{- define "media-common.openvpn.volume" -}}
|
||||
{{- if or .Values.openvpn.vpnConf .Values.openvpn.scripts.up .Values.openvpn.scripts.down -}}
|
||||
- name: openvpnconf
|
||||
configMap:
|
||||
name: {{ template "media-common.fullname" . }}-openvpn
|
||||
items:
|
||||
{{- if .Values.openvpn.vpnConf }}
|
||||
- key: vpnConf
|
||||
path: vpnConf
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.scripts.up }}
|
||||
- key: up.sh
|
||||
path: up.sh
|
||||
mode: 0777
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.scripts.down }}
|
||||
- key: down.sh
|
||||
path: down.sh
|
||||
mode: 0777
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
67
charts/media-common-openvpn/values.yaml
Normal file
67
charts/media-common-openvpn/values.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
# Default values for media-common-openvpn.
|
||||
|
||||
image:
|
||||
repository: dperson/openvpn-client
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# All variables specified here will be added to the openvpn sidecar container
|
||||
# Ref https://hub.docker.com/r/dperson/openvpn-client for all config values
|
||||
env: []
|
||||
# TZ: UTC
|
||||
|
||||
# Provide a customized vpn.conf file to be used by openvpn.
|
||||
vpnConf: # |-
|
||||
# Some Example Config
|
||||
# remote greatvpnhost.com 8888
|
||||
# auth-user-pass
|
||||
# Cipher AES
|
||||
|
||||
# Provide custom up/down scripts that can be used by the vpnConf
|
||||
scripts:
|
||||
up: # |-
|
||||
# #!/bin/bash
|
||||
# echo "connected" > /shared/vpnstatus
|
||||
down: # |-
|
||||
# #!/bin/bash
|
||||
# echo "disconnected" > /shared/vpnstatus
|
||||
|
||||
# Credentials to connect to the VPN Service (used with -a)
|
||||
auth: # "user;password"
|
||||
# OR specify an existing secret that contains the credentials. Credentials should be stored
|
||||
# under the VPN_AUTH key
|
||||
authSecret: # my-vpn-secret
|
||||
|
||||
additionalVolumeMounts: []
|
||||
|
||||
# Optionally specify a livenessProbe, e.g. to check if the connection is still
|
||||
# being protected by the VPN
|
||||
livenessProbe: {}
|
||||
# exec:
|
||||
# command:
|
||||
# - sh
|
||||
# - -c
|
||||
# - if [ $(curl -s https://ipinfo.io/country) == 'US' ]; then exit 0; else exit $?; fi
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 60
|
||||
# failureThreshold: 1
|
||||
|
||||
# If set to true, will deploy a network policy that blocks all outbound
|
||||
# traffic except traffic specified as allowed
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
|
||||
# The egress configuration for your network policy, All outbound traffic
|
||||
# From the pod will be blocked unless specified here. Your cluster must
|
||||
# have a CNI that supports network policies (Canal, Calico, etc...)
|
||||
# https://kubernetes.io/docs/concepts/services-networking/network-policies/
|
||||
# https://github.com/ahmetb/kubernetes-network-policy-recipes
|
||||
egress:
|
||||
# - to:
|
||||
# - ipBlock:
|
||||
# cidr: 0.0.0.0/0
|
||||
# ports:
|
||||
# - port: 53
|
||||
# protocol: UDP
|
||||
# - port: 53
|
||||
# protocol: TCP
|
||||
17
charts/media-common/Chart.yaml
Normal file
17
charts/media-common/Chart.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: v2
|
||||
name: media-common
|
||||
description: Common dependancy chart for media ecosystem containers
|
||||
type: application
|
||||
version: 1.3.0
|
||||
keywords:
|
||||
- media-common
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common
|
||||
maintainers:
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common-openvpn
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ^1.0.0
|
||||
condition: openvpn.enabled
|
||||
alias: openvpn
|
||||
4
charts/media-common/OWNERS
Normal file
4
charts/media-common/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- DirtyCajunRice
|
||||
30
charts/media-common/README.md
Normal file
30
charts/media-common/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Shared base chart for k8s@home media charts
|
||||
|
||||
Many containers have no environmentally configurable settings. This chart allows a single maintainable
|
||||
base with umbrella charts for container-specific differences. This chart does not have a default
|
||||
repository or tag, and not designed to be deployed directly.
|
||||
|
||||
## Known Parent Charts
|
||||
|
||||
* [k8s-at-home/radarr](https://github.com/k8s-at-home/charts/tree/master/charts/radarr)
|
||||
* [k8s-at-home/sonarr](https://github.com/k8s-at-home/charts/tree/master/charts/sonarr)
|
||||
* [k8s-at-home/lidarr](https://github.com/k8s-at-home/charts/tree/master/charts/lidarr)
|
||||
* [k8s-at-home/tautulli](https://github.com/k8s-at-home/charts/tree/master/charts/tautulli)
|
||||
* [k8s-at-home/ombi](https://github.com/k8s-at-home/charts/tree/master/charts/ombi)
|
||||
* [k8s-at-home/organizr](https://github.com/k8s-at-home/charts/tree/master/charts/organizr)
|
||||
|
||||
## Configuration
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml) file.
|
||||
It has several commented out suggested values.
|
||||
|
||||
These values will normally be nested as it is a dependency, for example:
|
||||
```yaml
|
||||
radarr:
|
||||
<values>
|
||||
```
|
||||
|
||||
## Add-ons
|
||||
|
||||
### OpenVPN
|
||||
It is possible to enable an OpenVPN add-on by setting `openvpn.enabled: true`. For more information refer to [k8s-at-home/media-common-openvpn](https://github.com/k8s-at-home/charts/tree/master/charts/media-common-openvpn)
|
||||
35
charts/media-common/ci/ct-values.yaml
Normal file
35
charts/media-common/ci/ct-values.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: radarr
|
||||
tag: latest
|
||||
service:
|
||||
port: 7878
|
||||
|
||||
openvpn:
|
||||
enabled: true
|
||||
|
||||
image:
|
||||
repository: dperson/openvpn-client
|
||||
tag: latest
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
auth: user;pass
|
||||
|
||||
env:
|
||||
TZ: UTC
|
||||
|
||||
scripts:
|
||||
up:
|
||||
down:
|
||||
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
exec:
|
||||
command:
|
||||
- echo
|
||||
- success
|
||||
@@ -4,16 +4,16 @@
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "radarr.fullname" . }})
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "media-common.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "radarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "radarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "media-common.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "media-common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "radarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "media-common.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
85
charts/media-common/templates/_helpers.tpl
Normal file
85
charts/media-common/templates/_helpers.tpl
Normal file
@@ -0,0 +1,85 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "media-common.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "media-common.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "media-common.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "media-common.labels" -}}
|
||||
helm.sh/chart: {{ include "media-common.chart" . }}
|
||||
{{ include "media-common.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "media-common.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "media-common.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Init Containers
|
||||
*/}}
|
||||
{{- define "media-common.initContainers" -}}
|
||||
{{- if .Values.initContainers }}
|
||||
{{- toYaml .Values.initContainers }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Additional Containers
|
||||
*/}}
|
||||
{{- define "media-common.additionalContainers" -}}
|
||||
{{- if .Values.additionalContainers }}
|
||||
{{- toYaml .Values.additionalContainers }}
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.enabled }}
|
||||
{{ include "media-common.openvpn.container" . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Additional Volumes
|
||||
*/}}
|
||||
{{- define "media-common.additionalVolumes" -}}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes }}
|
||||
{{- end }}
|
||||
{{- if .Values.openvpn.enabled }}
|
||||
{{ include "media-common.openvpn.volume" . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
8
charts/media-common/templates/addon-openvpn.yaml
Normal file
8
charts/media-common/templates/addon-openvpn.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
{{- if .Values.openvpn.enabled -}}
|
||||
---
|
||||
{{ include "media-common.openvpn.configmap" . }}
|
||||
---
|
||||
{{ include "media-common.openvpn.secret" . }}
|
||||
---
|
||||
{{ include "media-common.openvpn.networkpolicy" . }}
|
||||
{{- end -}}
|
||||
10
charts/media-common/templates/configmap.yaml
Normal file
10
charts/media-common/templates/configmap.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
{{- if .Values.env }}
|
||||
data:
|
||||
{{- toYaml .Values.env | nindent 2 }}
|
||||
{{- end }}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user