Compare commits
102 Commits
jellyfin-2
...
lidarr-4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b629ecc876 | ||
|
|
2676dbded2 | ||
|
|
7e92803f87 | ||
|
|
10cfeb8bd1 | ||
|
|
4f99bc67fb | ||
|
|
6d5c992852 | ||
|
|
75fd9f4e6d | ||
|
|
da9bea90b3 | ||
|
|
3b06c431b0 | ||
|
|
b899548da9 | ||
|
|
74845ca08e | ||
|
|
3a40f65b46 | ||
|
|
43392e1e7a | ||
|
|
d3406d1f39 | ||
|
|
db24d009cc | ||
|
|
b94814d3d7 | ||
|
|
3070528d2f | ||
|
|
de73201b2b | ||
|
|
ba4e6b978c | ||
|
|
48df925051 | ||
|
|
2ecc70f1df | ||
|
|
5c35aa1a1d | ||
|
|
12853f3b9a | ||
|
|
31959e5e37 | ||
|
|
a75a6cef77 | ||
|
|
ac68205d8b | ||
|
|
66d5bd7193 | ||
|
|
c40bdfeff7 | ||
|
|
04478fd52f | ||
|
|
ab4fd1b1e0 | ||
|
|
aec35fe08f | ||
|
|
16828ba415 | ||
|
|
2a3f676426 | ||
|
|
1b1898809b | ||
|
|
1dff5670d8 | ||
|
|
e5b78c7314 | ||
|
|
c5b81a263f | ||
|
|
e7e4665389 | ||
|
|
990ba59dfa | ||
|
|
0ed3ecbb48 | ||
|
|
480fa5a7d3 | ||
|
|
1f6050759b | ||
|
|
0f37c8776d | ||
|
|
5451ce26ab | ||
|
|
107e53d3b7 | ||
|
|
f4855955cf | ||
|
|
a5694ab9d9 | ||
|
|
2508a42660 | ||
|
|
cf4c0ba997 | ||
|
|
5705371a35 | ||
|
|
76c5160e37 | ||
|
|
a26921bba5 | ||
|
|
c67e3df333 | ||
|
|
97f18a033c | ||
|
|
22017632bc | ||
|
|
c991d11bce | ||
|
|
561a0f25bb | ||
|
|
e0f64a26f2 | ||
|
|
8999baca25 | ||
|
|
90daf5bcf1 | ||
|
|
1746270044 | ||
|
|
55313d0be2 | ||
|
|
153620272e | ||
|
|
8a7fe72ea6 | ||
|
|
ca6493faf3 | ||
|
|
576ff487df | ||
|
|
65abab892e | ||
|
|
50ce4d6bde | ||
|
|
c69cc6751f | ||
|
|
995ef7ef2b | ||
|
|
6a3b129a4b | ||
|
|
6c8d01add3 | ||
|
|
9798bb82cc | ||
|
|
0322acc6fe | ||
|
|
0a221f5297 | ||
|
|
ab941ae48d | ||
|
|
a078da5499 | ||
|
|
c2df150921 | ||
|
|
d28bf3fecf | ||
|
|
7f3bc53d12 | ||
|
|
652612e76b | ||
|
|
93addda234 | ||
|
|
08f9adbd73 | ||
|
|
73956c3eed | ||
|
|
609b2dbe31 | ||
|
|
ac0202a0c4 | ||
|
|
1a67cf9070 | ||
|
|
4cbe828448 | ||
|
|
be82a0fccb | ||
|
|
d1fbb47709 | ||
|
|
214dd6eaac | ||
|
|
3f50bc7f61 | ||
|
|
10348d1c0b | ||
|
|
062db282ed | ||
|
|
457a149637 | ||
|
|
1b9cfcfb80 | ||
|
|
23a666b18b | ||
|
|
66a943c448 | ||
|
|
8c958cbadb | ||
|
|
ba63649c59 | ||
|
|
d149fb6bd7 | ||
|
|
f5241bde3a |
14
.github/workflows/lint-test.yaml
vendored
14
.github/workflows/lint-test.yaml
vendored
@@ -1,30 +1,28 @@
|
||||
name: Lint and Test Charts
|
||||
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
run: |
|
||||
git fetch --prune --unshallow;
|
||||
echo ::set-env name=commitmsg::$(git log --format=%B -n 1 ${{ github.event.after }})
|
||||
- name: Run chart-testing (lint)
|
||||
id: lint
|
||||
uses: helm/chart-testing-action@v1.0.0
|
||||
if: "! contains(env.commitmsg, '[skip lint]')"
|
||||
with:
|
||||
command: lint
|
||||
config: ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.0.0
|
||||
if: steps.lint.outputs.changed == 'true'
|
||||
|
||||
if: "steps.lint.outputs.changed == 'true' && ! contains(env.commitmsg, '[skip install]')"
|
||||
- name: Run chart-testing (install)
|
||||
uses: helm/chart-testing-action@v1.0.0
|
||||
if: "steps.lint.outputs.changed == 'true' && ! contains(env.commitmsg, '[skip install]')"
|
||||
with:
|
||||
command: install
|
||||
config: ct.yaml
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
.env
|
||||
.idea
|
||||
|
||||
@@ -2,7 +2,7 @@ apiVersion: v2
|
||||
appVersion: v0.9.0.2
|
||||
description: Bazarr is a companion application to Sonarr and Radarr. It manages and downloads subtitles based on your requirements
|
||||
name: bazarr
|
||||
version: 3.0.1
|
||||
version: 3.1.0
|
||||
keywords:
|
||||
- bazarr
|
||||
- radarr
|
||||
|
||||
@@ -75,6 +75,7 @@ The following tables lists the configurable parameters of the Sentry chart and t
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subpath` | Select a subpath in the PVC | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
|
||||
@@ -64,6 +64,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: {{ .Values.persistence.config.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
|
||||
@@ -78,6 +78,7 @@ persistence:
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
|
||||
12
charts/dashmachine/Chart.yaml
Normal file
12
charts/dashmachine/Chart.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.5-4
|
||||
description: DashMachine is another web application bookmark dashboard, with fun features.
|
||||
icon: https://github.com/rmountjoy92/DashMachine/raw/master/dashmachine/static/images/logo/logo.png
|
||||
home: https://github.com/rmountjoy92/DashMachine
|
||||
name: dashmachine
|
||||
version: 1.0.0
|
||||
sources:
|
||||
- https://github.com/rmountjoy92/DashMachine
|
||||
maintainers:
|
||||
- name: carpenike
|
||||
email: ryan@ryanholt.net
|
||||
31
charts/dashmachine/README.md
Normal file
31
charts/dashmachine/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
dashmachine
|
||||
===========
|
||||
DashMachine is another web application bookmark dashboard, with fun features.
|
||||
|
||||
## Chart Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | |
|
||||
| deploymentAnnotations | object | `{}` | |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | |
|
||||
| image.repository | string | `"rmountjoy/dashmachine"` | |
|
||||
| image.tag | string | `"latest"` | |
|
||||
| ingress.annotations | object | `{}` | |
|
||||
| ingress.enabled | bool | `false` | |
|
||||
| ingress.hosts[0] | string | `"chart-example.local"` | |
|
||||
| ingress.paths[0] | string | `"/"` | |
|
||||
| ingress.tls | list | `[]` | |
|
||||
| nameOverride | string | `""` | |
|
||||
| nodeSelector | object | `{}` | |
|
||||
| persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| persistence.enabled | bool | `false` | |
|
||||
| persistence.size | string | `"1Gi"` | |
|
||||
| persistence.storageClassName | string | `""` | |
|
||||
| podAnnotations | object | `{}` | |
|
||||
| replicaCount | int | `1` | |
|
||||
| resources | object | `{}` | |
|
||||
| service.port | int | `5000` | |
|
||||
| service.type | string | `"ClusterIP"` | |
|
||||
| tolerations | list | `[]` | |
|
||||
@@ -1,19 +1,21 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range $.Values.ingress.paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "radarr.fullname" . }})
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "dashmachine.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "radarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "radarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "dashmachine.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "dashmachine.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "radarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "dashmachine.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -2,7 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "lidarr.name" -}}
|
||||
{{- define "dashmachine.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "lidarr.fullname" -}}
|
||||
{{- define "dashmachine.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
@@ -27,6 +27,6 @@ If release name contains chart name it will be used as a full name.
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "lidarr.chart" -}}
|
||||
{{- define "dashmachine.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
78
charts/dashmachine/templates/deployment.yaml
Normal file
78
charts/dashmachine/templates/deployment.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dashmachine.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml .Values.dnsConfig | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5000
|
||||
protocol: TCP
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# path: /notifications
|
||||
# port: http
|
||||
# readinessProbe:
|
||||
# httpGet:
|
||||
# path: /notifications
|
||||
# port: http
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /dashmachine/dashmachine/user_data
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ template "dashmachine.fullname" . }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,22 +1,19 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "lidarr.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- $fullName := include "dashmachine.fullname" . -}}
|
||||
{{- $ingressPaths := .Values.ingress.paths -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
@@ -33,9 +30,11 @@ spec:
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
{{- range $ingressPaths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
24
charts/dashmachine/templates/pvc.yaml
Normal file
24
charts/dashmachine/templates/pvc.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ template "dashmachine.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.persistence.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
storageClassName: {{ .Values.persistence.storageClass }}
|
||||
{{- end -}}
|
||||
19
charts/dashmachine/templates/service.yaml
Normal file
19
charts/dashmachine/templates/service.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dashmachine.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
18
charts/dashmachine/templates/tests/test-connection.yaml
Normal file
18
charts/dashmachine/templates/tests/test-connection.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "dashmachine.fullname" . }}-test-connection"
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dashmachine.name" . }}
|
||||
helm.sh/chart: {{ include "dashmachine.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "dashmachine.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
65
charts/dashmachine/values.yaml
Normal file
65
charts/dashmachine/values.yaml
Normal file
@@ -0,0 +1,65 @@
|
||||
# Default values for dashmachine.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: rmountjoy/dashmachine
|
||||
tag: v0.5-4
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5000
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
paths: ["/"]
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
@@ -2,7 +2,7 @@ apiVersion: v2
|
||||
appVersion: v0.16.1045
|
||||
description: API Support for your favorite torrent trackers
|
||||
name: jackett
|
||||
version: 3.0.1
|
||||
version: 3.1.0
|
||||
keywords:
|
||||
- jackett
|
||||
- torrent
|
||||
|
||||
@@ -31,55 +31,58 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/jackett` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/jackett/tags/).| `v0.12.1132-ls37`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the Jackett instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the Jackett instance should run as | `1001` |
|
||||
| `pgid` | process groupID the Jackett instance should run as | `1001` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.periodSeconds` | Specify liveness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.periodSeconds` | Specify readiness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.initialDelaySeconds` | Specify startup `initialDelaySeconds` parameter for the deployment | `5` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `30` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the Jackett GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the Jackett GUI is exposed| `9117` |
|
||||
| `Service.annotations` | Service annotations for the Jackett GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the Jackett GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.torrentblackhole.enabled` | Use persistent volume to store torrent files | `false` |
|
||||
| `persistence.torrentblackhole.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.torrentblackhole.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.torrentblackhole.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.torrentblackhole.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.torrentblackhole.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.torrentblackhole.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
| Parameter | Description | Default |
|
||||
| -------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- |
|
||||
| `image.repository` | Image repository | `linuxserver/jackett` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/jackett/tags/). | `v0.12.1132-ls37` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the Jackett instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the Jackett instance should run as | `1001` |
|
||||
| `pgid` | process groupID the Jackett instance should run as | `1001` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.periodSeconds` | Specify liveness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.periodSeconds` | Specify readiness `periodSeconds` parameter for the deployment | `10` |
|
||||
| `probes.startup.initialDelaySeconds` | Specify startup `initialDelaySeconds` parameter for the deployment | `5` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `30` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the Jackett GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the Jackett GUI is exposed | `9117` |
|
||||
| `Service.annotations` | Service annotations for the Jackett GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the Jackett GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.torrentblackhole.enabled` | Use persistent volume to store torrent files | `false` |
|
||||
| `persistence.torrentblackhole.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.torrentblackhole.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.torrentblackhole.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.torrentblackhole.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.torrentblackhole.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.torrentblackhole.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
| `hostNetwork` | Specify whether pods should use host networking | `false` |
|
||||
| `dnsPolicy` | Set the DNS policy for pods, ex: ClusterFirst, ClusterFirstWithHostNet. See info [here](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | `ClusterFirst` |
|
||||
| `dnsConfig` | Specify DNS options for pods, see values.yaml for details, or see [here](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) | `{}` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
@@ -96,6 +99,7 @@ helm install --name my-release -f values.yaml k8s-at-home/jackett
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
@@ -34,6 +34,11 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
hostNetwork: {{ .Values.hostNetwork }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig: {{ toYaml .Values.dnsConfig | nindent 8}}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
|
||||
@@ -121,6 +121,23 @@ resources: {}
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
dnsPolicy: ClusterFirst
|
||||
|
||||
dnsConfig: {}
|
||||
# dnsConfig may be used with any dnsPolicy, but is required when dnsPolicy: "None"
|
||||
# To use, remove the braces above, and uncomment/modify the following lines.
|
||||
# See https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
|
||||
# for additional information
|
||||
# nameservers:
|
||||
# - 1.1.1.1
|
||||
# searches:
|
||||
# - ns1.mysearch.domain
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "1"
|
||||
|
||||
hostNetwork: false
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.7.1.1784
|
||||
description: Looks and smells like Sonarr but made for music.
|
||||
name: lidarr
|
||||
version: 3.0.1
|
||||
description: Looks and smells like Sonarr but made for music
|
||||
type: application
|
||||
version: 4.0.0
|
||||
appVersion: 0.7.1.1785-ls18
|
||||
keywords:
|
||||
- lidarr
|
||||
- usenet
|
||||
- bittorrent
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/lidarr
|
||||
icon: https://lidarr.audio/img/logo.png
|
||||
icon: https://github.com/lidarr/Lidarr/blob/develop/Logo/512.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/lidarr/
|
||||
- https://github.com/lidarr/Lidarr/
|
||||
- https://github.com/Lidarr/Lidarr
|
||||
- https://hub.docker.com/r/linuxserver/lidarr
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: lidarr
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,115 +1,79 @@
|
||||
# lidarr music download client
|
||||
# Lidarr | Looks and smells like Sonarr but made for music
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
|
||||
* Deploys [Lidarr](https://github.com/lidarr/Lidarr)
|
||||
|
||||
This is a helm chart for [lidarr](https://github.com/lidarr/Lidarr) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `lidarr`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/lidarr
|
||||
helm install lidarr k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 4.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `lidarr:` key.
|
||||
|
||||
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Lidarr copying files rather than linking; using additional storage without the user's knowledge.
|
||||
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would
|
||||
be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because
|
||||
each PVC exposed to the pod as a separate filesystem. It resulted in Lidarr copying files rather than linking;
|
||||
using additional storage without the user's knowledge.
|
||||
|
||||
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or
|
||||
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
|
||||
1. [Uninstall](#uninstalling-the-chart) your current release
|
||||
2. On your backing store, organize your media, ie. media/music, media/downloads
|
||||
3. If using a pre-existing PVC, create a single new PVC for all of your media
|
||||
4. Refer to the [configuration](#configuration) for updates to the chart values
|
||||
5. Re-install the chart
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's `Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the `Root Folder` and hit save.
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's
|
||||
`Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the
|
||||
`Root Folder` and hit save.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `lidarr` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall lidarr
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/lidarr` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/lidarr/tags/).| `0.7.1.1381-ls7`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the lidarr instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the lidarr instance should run as | `1001` |
|
||||
| `pgid` | process groupID the lidarr instance should run as | `1001` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the lidarr GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the lidarr GUI is exposed| `8686` |
|
||||
| `Service.annotations` | Service annotations for the lidarr GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the lidarr GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.media.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.media.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install lidarr \
|
||||
--set lidarr.env.TZ="America/New York" \
|
||||
k8s-at-home/lidarr
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
|
||||
chart. For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/lidarr
|
||||
helm install lidarr k8s-at-home/lidarr --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
lidarr:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use`existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/lidarr/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,110 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "lidarr.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8686
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: media
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-media{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-media
|
||||
{{- if .Values.persistence.media.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.media.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.media.size | quote }}
|
||||
{{- if .Values.persistence.media.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.media.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.media.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,132 +1,10 @@
|
||||
# Default values for lidarr.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/lidarr
|
||||
tag: 0.7.1.1784-ls18
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8686
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## lidarr configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
media:
|
||||
enabled: true
|
||||
## lidarr media volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
lidarr:
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: lidarr
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 0.7.1.1785-ls18
|
||||
service:
|
||||
port: 8686
|
||||
|
||||
11
charts/media-common/Chart.yaml
Normal file
11
charts/media-common/Chart.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v2
|
||||
name: media-common
|
||||
description: Common dependancy chart for media ecosystem containers
|
||||
type: application
|
||||
version: 1.0.1
|
||||
keywords:
|
||||
- media-common
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common
|
||||
maintainers:
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
4
charts/media-common/OWNERS
Normal file
4
charts/media-common/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- DirtyCajunRice
|
||||
25
charts/media-common/README.md
Normal file
25
charts/media-common/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Shared base chart for k8s@home media charts
|
||||
|
||||
Many containers have no environmentally configurable settings. This chart allows a single maintainable
|
||||
base with umbrella charts for container-specific differences. This chart does not have a default
|
||||
repository or tag, and not designed to be deployed directly.
|
||||
|
||||
## Known Parent Charts
|
||||
|
||||
* [k8s-at-home/radarr](https://github.com/k8s-at-home/charts/tree/master/charts/radarr)
|
||||
* [k8s-at-home/sonarr](https://github.com/k8s-at-home/charts/tree/master/charts/sonarr)
|
||||
* [k8s-at-home/lidarr](https://github.com/k8s-at-home/charts/tree/master/charts/lidarr)
|
||||
* [k8s-at-home/tautulli](https://github.com/k8s-at-home/charts/tree/master/charts/tautulli)
|
||||
* [k8s-at-home/ombi](https://github.com/k8s-at-home/charts/tree/master/charts/ombi)
|
||||
* [k8s-at-home/organizr](https://github.com/k8s-at-home/charts/tree/master/charts/organizr)
|
||||
|
||||
## Configuration
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml) file.
|
||||
It has several commented out suggested values.
|
||||
|
||||
These values will normally be nested as it is a dependency, for example:
|
||||
```yaml
|
||||
radarr:
|
||||
<values>
|
||||
```
|
||||
6
charts/media-common/ci/ct-values.yaml
Normal file
6
charts/media-common/ci/ct-values.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: radarr
|
||||
tag: latest
|
||||
service:
|
||||
port: 7878
|
||||
@@ -4,16 +4,16 @@
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "lidarr.fullname" . }})
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "media-common.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "lidarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "lidarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "media-common.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "media-common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "lidarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "media-common.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -2,7 +2,7 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "radarr.name" -}}
|
||||
{{- define "media-common.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "radarr.fullname" -}}
|
||||
{{- define "media-common.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
@@ -27,6 +27,26 @@ If release name contains chart name it will be used as a full name.
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "radarr.chart" -}}
|
||||
{{- define "media-common.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "media-common.labels" -}}
|
||||
helm.sh/chart: {{ include "media-common.chart" . }}
|
||||
{{ include "media-common.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "media-common.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "media-common.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
10
charts/media-common/templates/configmap.yaml
Normal file
10
charts/media-common/templates/configmap.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
{{- if .Values.env }}
|
||||
data:
|
||||
{{- toYaml .Values.env | nindent 2 }}
|
||||
{{- end }}
|
||||
105
charts/media-common/templates/deployment.yaml
Normal file
105
charts/media-common/templates/deployment.yaml
Normal file
@@ -0,0 +1,105 @@
|
||||
{{- if eq .Values.persistence.type "deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "media-common.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "media-common.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ template "media-common.fullname" . }}
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.organization }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
volumeMounts:
|
||||
- mountPath: {{ .Values.configPath }}
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: {{ .Values.persistence.config.subPath }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumeMounts }}
|
||||
{{- toYaml .Values.additionalVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "media-common.fullname" . }}{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
- name: media
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "media-common.fullname" . }}-media{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
81
charts/media-common/templates/ingress.yaml
Normal file
81
charts/media-common/templates/ingress.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "media-common.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $index, $ingress := .Values.ingress.extraIngresses }}
|
||||
---
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}-{{ $ingress.nameSuffix | default $index }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
{{- with $ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if $ingress.tls }}
|
||||
tls:
|
||||
{{- range $ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range $ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
44
charts/media-common/templates/pvc.yaml
Normal file
44
charts/media-common/templates/pvc.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) -}}
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
storageClassName: {{ if (eq "-" .Values.persistence.config.storageClass) }}""{{- else }}{{ .Values.persistence.config.storageClass | quote }}{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}-media
|
||||
{{- if .Values.persistence.media.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.media.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.media.size | quote }}
|
||||
{{- if .Values.persistence.media.storageClass }}
|
||||
storageClassName: {{ if (eq "-" .Values.persistence.media.storageClass) }}""{{- else }}{{ .Values.persistence.media.storageClass | quote}}{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
28
charts/media-common/templates/service.yaml
Normal file
28
charts/media-common/templates/service.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.additionalSpec }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "media-common.selectorLabels" . | nindent 4 }}
|
||||
106
charts/media-common/templates/statefulset.yaml
Normal file
106
charts/media-common/templates/statefulset.yaml
Normal file
@@ -0,0 +1,106 @@
|
||||
{{- if eq .Values.persistence.type "statefulset" }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
labels:
|
||||
{{- include "media-common.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "media-common.selectorLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "media-common.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "media-common.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ template "media-common.fullname" . }}
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.organization }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: {{ template "media-common.fullname" . }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
volumeMounts:
|
||||
- mountPath: {{ .Values.configPath }}
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: {{ .Values.persistence.config.subPath }}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumeMounts }}
|
||||
{{- toYaml .Values.additionalVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "media-common.fullname" . }}{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
- name: media
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "media-common.fullname" . }}-media{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalVolumes }}
|
||||
{{- toYaml .Values.additionalVolumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
149
charts/media-common/values.yaml
Normal file
149
charts/media-common/values.yaml
Normal file
@@ -0,0 +1,149 @@
|
||||
# Default values for media-common.
|
||||
|
||||
image:
|
||||
organization: ""
|
||||
repository: ""
|
||||
pullPolicy: IfNotPresent
|
||||
tag: ""
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
configPath: /config
|
||||
|
||||
env:
|
||||
TZ: UTC
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: ""
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
additionalSpec: {}
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- /
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
extraIngresses:
|
||||
# - enabled: false
|
||||
# nameSuffix: "api"
|
||||
# annotations: {}
|
||||
# # kubernetes.io/ingress.class: nginx
|
||||
# # kubernetes.io/tls-acme: "true"
|
||||
# labels: {}
|
||||
# hosts:
|
||||
# - host: chart-example.local
|
||||
# paths:
|
||||
# - /api
|
||||
# tls: []
|
||||
# # - secretName: chart-example-tls
|
||||
# # hosts:
|
||||
# # - chart-example.local
|
||||
|
||||
persistence:
|
||||
# type: options are statefulset or deployment
|
||||
type: statefulset
|
||||
config:
|
||||
enabled: true
|
||||
## media-common configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
media:
|
||||
enabled: false
|
||||
## media-common media volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
|
||||
additionalVolumes: []
|
||||
|
||||
additionalVolumeMounts: []
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
@@ -2,7 +2,7 @@ apiVersion: v2
|
||||
appVersion: v2.26.0
|
||||
description: Usenet meta search
|
||||
name: nzbhydra2
|
||||
version: 3.0.1
|
||||
version: 3.0.2
|
||||
keywords:
|
||||
- nzbhydra2
|
||||
- usenet
|
||||
|
||||
@@ -47,12 +47,12 @@ The following tables lists the configurable parameters of the Sentry chart and t
|
||||
| `probes.startup.initialDelaySeconds` | Specify startup `initialDelaySeconds` parameter for the deployment | `5` |
|
||||
| `probes.startup.failureThreshold` | Specify startup `failureThreshold` parameter for the deployment | `30` |
|
||||
| `probes.startup.periodSeconds` | Specify startup `periodSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the nzbhydra2 GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the nzbhydra2 GUI is exposed| `5076` |
|
||||
| `Service.annotations` | Service annotations for the nzbhydra2 GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the nzbhydra2 GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `service.type` | Kubernetes service type for the nzbhydra2 GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the nzbhydra2 GUI is exposed| `5076` |
|
||||
| `service.annotations` | Service annotations for the nzbhydra2 GUI | `{}` |
|
||||
| `service.labels` | Custom labels | `{}` |
|
||||
| `service.loadBalancerIP` | Loadbalance IP for the nzbhydra2 GUI | `{}` |
|
||||
| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
apiVersion: v2
|
||||
appVersion: v4.0.464
|
||||
description: Want a Movie or TV Show on Plex or Emby? Use Ombi!
|
||||
name: ombi
|
||||
version: 3.0.1
|
||||
description: Want a Movie or TV Show on Plex or Emby? Use Ombi!
|
||||
type: application
|
||||
version: 4.0.0
|
||||
appVersion: 4.0.471
|
||||
keywords:
|
||||
- ombi
|
||||
- plex
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/ombi
|
||||
icon: https://ombi.io/img/logo-orange-small.png
|
||||
icon: https://github.com/tidusjar/Ombi/blob/feature/v4/src/Ombi/wwwroot/images/ms-icon-310x310.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/ombi/
|
||||
- https://ombi.io/
|
||||
- https://github.com/tidusjar/Ombi
|
||||
- https://hub.docker.com/r/linuxserver/ombi
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: ombi
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,97 +1,79 @@
|
||||
# Ombi
|
||||
# Ombi | Want a Movie or TV Show on Plex or Emby? Use Ombi!
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/ombi/)
|
||||
* Deploys [Ombi](https://github.com/tidusjar/Ombi)
|
||||
|
||||
This is a helm chart for [Ombi](https://ombi.io/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/ombi/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/ombi
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `ombi`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/ombi
|
||||
helm install ombi k8s-at-home/ombi
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 4.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `ombi:` key.
|
||||
|
||||
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Ombi would
|
||||
be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because
|
||||
each PVC exposed to the pod as a separate filesystem. It resulted in Ombi copying files rather than linking; using
|
||||
additional storage without the user's knowledge.
|
||||
|
||||
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or
|
||||
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
|
||||
1. [Uninstall](#uninstalling-the-chart) your current release
|
||||
2. On your backing store, organize your media, ie. media/music, media/downloads
|
||||
3. If using a pre-existing PVC, create a single new PVC for all of your media
|
||||
4. Refer to the [configuration](#configuration) for updates to the chart values
|
||||
5. Re-install the chart
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Ombi's
|
||||
`Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the
|
||||
`Root Folder` and hit save.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `ombi` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall ombi
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/ombi` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/ombi/tags/).| `3.0.4914-ls72`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the Ombi instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the Ombi instance should run as | `1001` |
|
||||
| `pgid` | process groupID the Ombi instance should run as | `1001` |
|
||||
| `baseUrl` | adjust baseUrl if behind a reverse proxy | null |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the Ombi GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the Ombi GUI is exposed| `3579` |
|
||||
| `Service.annotations` | Service annotations for the Ombi GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the Ombi GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install ombi \
|
||||
--set ombi.env.TZ="America/New York" \
|
||||
k8s-at-home/ombi
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart.
|
||||
For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml k8s-at-home/ombi
|
||||
helm install ombi k8s-at-home/ombi --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
ombi:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/ombi/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ombi.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "ombi.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ombi.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ombi.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:3579 to use your application"
|
||||
kubectl port-forward $POD_NAME 3579:80
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "ombi.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,95 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "ombi.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 3579
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
{{- if .Values.baseUrl }}
|
||||
- name: BASE_URL
|
||||
value: "{{ .Values.baseUrl }}"
|
||||
{{ end }}
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "ombi.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{ end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,53 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "ombi.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
helm.sh/chart: {{ include "ombi.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "ombi.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
@@ -1,117 +1,10 @@
|
||||
# Default values for Ombi.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
# Default values for ombi.
|
||||
|
||||
image:
|
||||
repository: linuxserver/ombi
|
||||
tag: v4.0.464-ls10
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
# Subfolder can optionally be defined as an env variable for reverse proxies. Keep
|
||||
# in mind that once this value is defined, the gui setting for base url no longer
|
||||
# works. To use the gui setting, remove this env variable.
|
||||
#
|
||||
# baseUrl: /ombi
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 3579
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## Ombi configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
ombi:
|
||||
image:
|
||||
organization: linuxserver
|
||||
repository: ombi
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v4.0.471-ls10
|
||||
service:
|
||||
port: 3579
|
||||
|
||||
21
charts/organizr/Chart.yaml
Normal file
21
charts/organizr/Chart.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v2
|
||||
name: organizr
|
||||
description: HTPC/Homelab Services Organizer - Written in PHP
|
||||
type: application
|
||||
version: 1.0.0
|
||||
appVersion: latest
|
||||
keywords:
|
||||
- organizr
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/organizr
|
||||
icon: https://github.com/causefx/Organizr/blob/v2-master/plugins/images/organizr/logo.png?raw=true
|
||||
sources:
|
||||
- https://github.com/causefx/Organizr
|
||||
- https://hub.docker.com/r/organizr/organizr
|
||||
maintainers:
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: organizr
|
||||
4
charts/organizr/OWNERS
Normal file
4
charts/organizr/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- DirtyCajunRice
|
||||
58
charts/organizr/README.md
Normal file
58
charts/organizr/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Organizr | HTPC/Homelab Services Organizer - Written in PHP
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [official image](https://hub.docker.com/r/organizr/organizr/)
|
||||
* Deploys [Organizr](https://github.com/causefx/Organizr)
|
||||
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/organizr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
To install the chart with the release name `organizr`:
|
||||
```console
|
||||
helm install organizr k8s-at-home/organizr
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
To uninstall the `organizr` deployment:
|
||||
```console
|
||||
helm uninstall organizr
|
||||
```
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
```console
|
||||
helm install organizr \
|
||||
--set organizr.env.TZ="America/New York" \
|
||||
k8s-at-home/organizr
|
||||
```
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart.
|
||||
For example,
|
||||
```console
|
||||
helm install organizr k8s-at-home/organizr --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
organizr:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
10
charts/organizr/values.yaml
Normal file
10
charts/organizr/values.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# Default values for organizr.
|
||||
|
||||
organizr:
|
||||
image:
|
||||
organization: organizr
|
||||
repository: organizr
|
||||
pullPolicy: IfNotPresent
|
||||
tag: latest
|
||||
service:
|
||||
port: 80
|
||||
@@ -2,7 +2,7 @@ apiVersion: v2
|
||||
appVersion: 1.20.1.3252
|
||||
description: Plex Media Server
|
||||
name: plex
|
||||
version: 2.0.1
|
||||
version: 2.0.2
|
||||
keywords:
|
||||
- plex
|
||||
home: https://plex.tv/
|
||||
|
||||
@@ -188,7 +188,7 @@ spec:
|
||||
name: {{ .Values.certificate.pkcsMangler.pfxPassword.secretName }}
|
||||
key: {{ .Values.certificate.pkcsMangler.pfxPassword.passwordKey }}
|
||||
- name: "PKCSMANGLER_CUSTOMCERTDOMAIN"
|
||||
value: "customCertificateDomain={{.Values.certificate.pkcsMangler.plexPreferences.customCertificateDomain}}"
|
||||
value: "customCertificateDomain={{.Values.certificate.pkcsMangler.setPlexPreferences.customCertificateDomain}}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
@@ -1,17 +1,21 @@
|
||||
apiVersion: v2
|
||||
appVersion: 3.0.0.3543
|
||||
description: Radarr is a movie downloading client
|
||||
name: radarr
|
||||
version: 5.0.1
|
||||
description: A fork of Sonarr to work with movies à la Couchpotato
|
||||
type: application
|
||||
version: 6.0.0
|
||||
appVersion: 3.0.0.3591
|
||||
keywords:
|
||||
- radarr
|
||||
- usenet
|
||||
- bittorrent
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/radarr
|
||||
icon: https://avatars3.githubusercontent.com/u/25025331?s=400&v=4
|
||||
icon: https://github.com/Radarr/Radarr/blob/aphrodite/Logo/512.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/radarr/
|
||||
- https://github.com/Radarr/Radarr/
|
||||
- https://github.com/Radarr/Radarr
|
||||
- https://hub.docker.com/r/linuxserver/radarr
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: radarr
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,131 +1,79 @@
|
||||
# radarr movie download client
|
||||
# Radarr | A fork of Sonarr to work with movies à la Couchpotato
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/radarr/)
|
||||
* Deploys [Radarr](https://github.com/Radarr/Radarr)
|
||||
|
||||
This is a helm chart for [radarr](https://github.com/Radarr/Radarr/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/radarr/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/radarr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `radarr`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/radarr
|
||||
helm install radarr k8s-at-home/radarr
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 6.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `radarr:` key.
|
||||
|
||||
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and Movies. This presented an issue where Radarr would be unable to hard-link files between the /downloads and /movies directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Radarr copying files rather than linking; using additional storage without the user's knowledge.
|
||||
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and Movies. This presented an issue where Radarr would
|
||||
be unable to hard-link files between the /downloads and /movies directories when importing media. This is caused because
|
||||
each PVC exposed to the pod as a separate filesystem. It resulted in Radarr copying files rather than linking;
|
||||
using additional storage without the user's knowledge.
|
||||
|
||||
This chart now uses a single PVC for Downloads and Movies. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from v1 of the chart, do the following:
|
||||
This chart now uses a single PVC for Downloads and Movies. This means all of your media (and downloads) must be in, or
|
||||
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
|
||||
1. [Uninstall](#uninstalling-the-chart) your current release
|
||||
2. On your backing store, organize your media, ie. media/movies, media/downloads
|
||||
3. If using a pre-existing PVC, create a single new PVC for all of your media
|
||||
4. Refer to the [configuration](#configuration) for updates to the chart values
|
||||
5. Re-install the chart
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Radarr's `Movie Editor` under the `Movies` tab. Simply select all movies in your library, and use the editor to change the `Root Folder` and hit save.
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Radarr's
|
||||
`Movie Editor` under the `Movies` tab. Simply select all artists in your library, and use the editor to change the
|
||||
`Root Folder` and hit save.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `radarr` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall radarr
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------- |
|
||||
| `image.repository` | Image repository | `linuxserver/radarr` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/radarr/tags/). | `v0.2.0.1480-ls58` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the instance should run as | `1001` |
|
||||
| `pgid` | process groupID the instance should run as | `1001` |
|
||||
| `exportarr.enabled` | Enable Prometheus monitoring with [Exportarr](https://github.com/onedr0p/exportarr) | `false` |
|
||||
| `exportarr.image.repository` | Exportarr image repository | `onedr0p/exportarr` |
|
||||
| `exportarr.image.tag` | Exportarr image tag | `v0.3.0` |
|
||||
| `exportarr.image.pullPolicy` | Exportarr image pullPolicy | `IfNotPresent` |
|
||||
| `exportarr.port` | Prometheus scrape port | `9708` |
|
||||
| `exportarr.url` | Radarr's URL | `http://radarr.default.svc.cluster.local:7878` |
|
||||
| `exportarr.apikey` | Radarr's API Key | |
|
||||
| `exportarr.serviceMonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` |
|
||||
| `exportarr.serviceMonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) |
|
||||
| `exportarr.serviceMonitor.path` | Prometheus scrape path | `/metrics` |
|
||||
| `exportarr.serviceMonitor.interval` | Prometheus scrape interval | `4m` |
|
||||
| `exportarr.serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `90s` |
|
||||
| `exportarr.serviceMonitor.additionalLabels` | Add custom labels to ServiceMonitor | `{}` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the GUI is exposed | `7878` |
|
||||
| `service.annotations` | Service annotations for the GUI | `{}` |
|
||||
| `service.labels` | Custom labels | `{}` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the GUI | `{}` |
|
||||
| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.subPath` | Mount a sub directory if set | `nil ` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.media.enabled` | Use persistent volume to store media | `true` |
|
||||
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.media.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.media.subPath` | Mount a sub directory if set | `nil ` |
|
||||
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install radarr \
|
||||
--set radarr.env.TZ="America/New York" \
|
||||
k8s-at-home/radarr
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
|
||||
chart. For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/radarr
|
||||
helm install radarr k8s-at-home/radarr --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
radarr:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/radarr/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "radarr.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,149 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "radarr.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 7878
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: {{ .Values.persistence.config.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- if .Values.exportarr.enabled }}
|
||||
- name: radarr-exporter
|
||||
image: "{{ .Values.exportarr.image.repository }}:{{ .Values.exportarr.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.exportarr.image.pullPolicy }}
|
||||
command: ["exportarr"]
|
||||
args: ["radarr"]
|
||||
env:
|
||||
- name: PORT
|
||||
value: "{{ .Values.exportarr.port }}"
|
||||
- name: URL
|
||||
value: "{{ .Values.exportarr.url }}"
|
||||
- name: APIKEY
|
||||
value: "{{ .Values.exportarr.apikey }}"
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ .Values.exportarr.port }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: monitoring
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: monitoring
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "radarr.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: media
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "radarr.fullname" . }}-media{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "radarr.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "radarr.fullname" . }}-media
|
||||
{{- if .Values.persistence.media.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.media.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.media.size | quote }}
|
||||
{{- if .Values.persistence.media.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.media.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.media.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{- if .Values.exportarr.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "radarr.fullname" . }}-exporter
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: monitoring
|
||||
port: {{ .Values.exportarr.port }}
|
||||
targetPort: monitoring
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "radarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,24 +0,0 @@
|
||||
{{- if .Values.exportarr.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "radarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
||||
{{- with .Values.exportarr.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
endpoints:
|
||||
- port: monitoring
|
||||
interval: {{ .Values.exportarr.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.exportarr.serviceMonitor.scrapeTimeout }}
|
||||
path: {{ .Values.exportarr.serviceMonitor.path }}
|
||||
{{- end }}
|
||||
@@ -1,151 +1,10 @@
|
||||
# Default values for radarr.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/radarr
|
||||
tag: 3.0.0.3543-ls21
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
# Prometheus Metrics
|
||||
exportarr:
|
||||
enabled: false
|
||||
radarr:
|
||||
image:
|
||||
repository: onedr0p/exportarr
|
||||
tag: v0.3.0
|
||||
organization: linuxserver
|
||||
repository: radarr
|
||||
pullPolicy: IfNotPresent
|
||||
url: "http://radarr.default.svc.cluster.local:7878"
|
||||
apikey:
|
||||
port: 9708
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
namespace: default
|
||||
path: /metrics
|
||||
interval: 4m
|
||||
scrapeTimeout: 90s
|
||||
additionalLabels: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 7878
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## radarr configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
media:
|
||||
enabled: true
|
||||
## radarr media volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
tag: 3.0.0.3624-ls21
|
||||
service:
|
||||
port: 7878
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
@@ -1,17 +1,21 @@
|
||||
apiVersion: v2
|
||||
appVersion: 3.0.3.911
|
||||
description: Sonarr is a television show downloading client
|
||||
name: sonarr
|
||||
version: 5.0.1
|
||||
description: Smart PVR for newsgroup and bittorrent users
|
||||
type: application
|
||||
version: 6.0.0
|
||||
appVersion: 3.0.3.913
|
||||
keywords:
|
||||
- sonarr
|
||||
- usenet
|
||||
- bittorrent
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/sonarr
|
||||
icon: https://avatars1.githubusercontent.com/u/1082903?s=400&v=4
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/sonarr
|
||||
icon: https://github.com/Sonarr/Sonarr/blob/phantom-develop/Logo/512.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/sonarr/
|
||||
- https://sonarr.tv/
|
||||
- https://github.com/Sonarr/Sonarr
|
||||
- https://hub.docker.com/r/linuxserver/sonarr
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: sonarr
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,132 +1,80 @@
|
||||
# sonarr televsion show download client
|
||||
# Sonarr | Smart PVR for newsgroup and bittorrent users
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/sonarr/)
|
||||
* Deploys [Sonarr](https://github.com/sonarr/Sonarr)
|
||||
|
||||
This is a helm chart for [sonarr](https://github.com/sonarr/sonarr/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/sonarr/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/sonarr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `sonarr`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/sonarr
|
||||
helm install sonarr k8s-at-home/sonarr
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 6.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `sonarr:` key.
|
||||
|
||||
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and TV. This presented an issue where Sonarr would be unable to hard-link files between the /downloads and /tv directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Sonarr copying files rather than linking; using additional storage without the user's knowledge.
|
||||
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and TV. This presented an issue where Sonarr would
|
||||
be unable to hard-link files between the /downloads and /tv directories when importing media. This is caused because
|
||||
each PVC exposed to the pod as a separate filesystem. It resulted in Sonarr copying files rather than linking; using
|
||||
additional storage without the user's knowledge.
|
||||
|
||||
This chart now uses a single PVC for Downloads and TV. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from v1 of the chart, do the following:
|
||||
This chart now uses a single PVC for Downloads and TV. This means all of your media (and downloads) must be in, or
|
||||
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
|
||||
|
||||
1. [Uninstall](#uninstalling-the-chart) your current release
|
||||
2. On your backing store, organize your media, ie. media/tv, media/downloads
|
||||
3. If using a pre-existing PVC, create a single new PVC for all of your media
|
||||
4. Refer to the [configuration](#configuration) for updates to the chart values
|
||||
5. Re-install the chart
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Sonarr's `Series Editor` under the `Series` tab. Simply select all series in your library, and use the editor to change the `Root Folder` and hit save.
|
||||
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Sonarr's
|
||||
`Series Editor` under the `Series` tab. Simply select all series in your library, and use the editor to change the
|
||||
`Root Folder` and hit save.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `sonarr` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall sonarr
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------- |
|
||||
| `image.repository` | Image repository | `linuxserver/sonarr` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/sonarr/tags/). | `2.0.0.5344-ls60` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the instance should run as | `1001` |
|
||||
| `pgid` | process groupID the instance should run as | `1001` |
|
||||
| `exportarr.enabled` | Enable Prometheus monitoring with [Exportarr](https://github.com/onedr0p/exportarr) | `false` |
|
||||
| `exportarr.image.repository` | Exportarr image repository | `onedr0p/exportarr` |
|
||||
| `exportarr.image.tag` | Exportarr image tag | `v0.3.0` |
|
||||
| `exportarr.image.pullPolicy` | Exportarr image pullPolicy | `IfNotPresent` |
|
||||
| `exportarr.port` | Prometheus scrape port | `9707` |
|
||||
| `exportarr.url` | Sonarr's URL | `http://sonarr.default.svc.cluster.local:8989` |
|
||||
| `exportarr.apikey` | Sonarr's API Key | |
|
||||
| `exportarr.enableEpisodeQualityMetrics` | Enable episode quality metrics gathering | `false` |
|
||||
| `exportarr.serviceMonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` |
|
||||
| `exportarr.serviceMonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) |
|
||||
| `exportarr.serviceMonitor.path` | Prometheus scrape path | `/metrics` |
|
||||
| `exportarr.serviceMonitor.interval` | Prometheus scrape interval | `4m` |
|
||||
| `exportarr.serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `90s` |
|
||||
| `exportarr.serviceMonitor.additionalLabels` | Add custom labels to ServiceMonitor | {} |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `service.type` | Kubernetes service type for the GUI | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where the GUI is exposed | `8989` |
|
||||
| `service.annotations` | Service annotations for the GUI | `{}` |
|
||||
| `service.labels` | Custom labels | `{}` |
|
||||
| `service.loadBalancerIP` | Loadbalancer IP for the GUI | `{}` |
|
||||
| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.subPath` | Mount a sub directory if set | `nil ` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.media.enabled` | Use persistent volume for media | `true` |
|
||||
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.media.existingClaim` | Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.media.subPath` | Mount a sub directory if set | `nil ` |
|
||||
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install sonarr \
|
||||
--set sonarr.env.TZ="America/New York" \
|
||||
k8s-at-home/sonarr
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
|
||||
chart. For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/sonarr
|
||||
helm install sonarr k8s-at-home/sonarr --values values.yaml
|
||||
```
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
sonarr:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use
|
||||
`existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/sonarr/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sonarr.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "sonarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sonarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sonarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "sonarr.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "sonarr.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "sonarr.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "sonarr.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,151 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "sonarr.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8989
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
- mountPath: /media
|
||||
name: media
|
||||
{{- if .Values.persistence.media.subPath }}
|
||||
subPath: {{ .Values.persistence.media.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- if .Values.exportarr.enabled }}
|
||||
- name: sonarr-exporter
|
||||
image: "{{ .Values.exportarr.image.repository }}:{{ .Values.exportarr.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.exportarr.image.pullPolicy }}
|
||||
command: ["exportarr"]
|
||||
args: ["sonarr"]
|
||||
env:
|
||||
- name: PORT
|
||||
value: "{{ .Values.exportarr.port }}"
|
||||
- name: URL
|
||||
value: "{{ .Values.exportarr.url }}"
|
||||
- name: APIKEY
|
||||
value: "{{ .Values.exportarr.apikey }}"
|
||||
- name: ENABLE_EPISODE_QUALITY_METRICS
|
||||
value: "{{ .Values.exportarr.enableEpisodeQualityMetrics }}"
|
||||
ports:
|
||||
- name: monitoring
|
||||
containerPort: {{ .Values.exportarr.port }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: monitoring
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: monitoring
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: media
|
||||
{{- if .Values.persistence.media.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-media{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "sonarr.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "sonarr.fullname" . }}-media
|
||||
{{- if .Values.persistence.media.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.media.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.media.size | quote }}
|
||||
{{- if .Values.persistence.media.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.media.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.media.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{- if .Values.exportarr.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "sonarr.fullname" . }}-exporter
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: monitoring
|
||||
port: {{ .Values.exportarr.port }}
|
||||
targetPort: monitoring
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -1,52 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "sonarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,24 +0,0 @@
|
||||
{{- if .Values.exportarr.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "sonarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
||||
{{- with .Values.exportarr.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
endpoints:
|
||||
- port: monitoring
|
||||
interval: {{ .Values.exportarr.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.exportarr.serviceMonitor.scrapeTimeout }}
|
||||
path: {{ .Values.exportarr.serviceMonitor.path }}
|
||||
{{- end }}
|
||||
@@ -1,153 +1,10 @@
|
||||
# Default values for sonarr.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/sonarr
|
||||
tag: 3.0.3.911-ls39
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
# Prometheus Metrics
|
||||
exportarr:
|
||||
enabled: false
|
||||
sonarr:
|
||||
image:
|
||||
repository: onedr0p/exportarr
|
||||
tag: v0.3.0
|
||||
organization: linuxserver
|
||||
repository: sonarr
|
||||
pullPolicy: IfNotPresent
|
||||
url: "http://sonarr.default.svc.cluster.local:8989"
|
||||
apikey:
|
||||
port: 9707
|
||||
# Enable to gather episode quality metrics, if enabled slows down scrape timing due to more API calls
|
||||
enableEpisodeQualityMetrics: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
namespace: default
|
||||
path: /metrics
|
||||
interval: 4m
|
||||
scrapeTimeout: 90s
|
||||
additionalLabels: {}
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8989
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## sonarr configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
media:
|
||||
enabled: true
|
||||
## sonarr media volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
tag: 3.0.3.913-ls40
|
||||
service:
|
||||
port: 8989
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
@@ -1,16 +1,21 @@
|
||||
apiVersion: v2
|
||||
appVersion: v2.5.4
|
||||
description: A Python based monitoring and tracking tool for Plex Media Server.
|
||||
name: tautulli
|
||||
version: 3.0.1
|
||||
description: A Python based monitoring and tracking tool for Plex Media Server
|
||||
type: application
|
||||
version: 4.0.0
|
||||
appVersion: v2.5.4
|
||||
keywords:
|
||||
- tautulli
|
||||
- plex
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/tautulli
|
||||
icon: https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/tautulli-icon.png
|
||||
icon: https://github.com/Tautulli/Tautulli/blob/master/data/interfaces/default/images/logo.png?raw=true
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/tautulli/
|
||||
- https://tautulli.com/
|
||||
- https://github.com/Tautulli/Tautulli
|
||||
- https://hub.docker.com/r/tautulli/tautulli
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: DirtyCajunRice
|
||||
email: nick@cajun.pro
|
||||
dependencies:
|
||||
- name: media-common
|
||||
repository: https://k8s-at-home.com/charts/
|
||||
version: ~1.0.0
|
||||
alias: tautulli
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
reviewers:
|
||||
- billimek
|
||||
- DirtyCajunRice
|
||||
|
||||
@@ -1,96 +1,62 @@
|
||||
# Tautulli
|
||||
# Tautulli | A Python based monitoring and tracking tool for Plex Media Server
|
||||
Umbrella chart that
|
||||
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
|
||||
* Adds docker image information leveraging the [official image](https://hub.docker.com/r/tautulli/tautulli/)
|
||||
* Deploys [Tautulli](https://github.com/Tautulli/Tautulli)
|
||||
|
||||
This is a helm chart for [Tautulli](https://tautulli.com/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/tautulli/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
## TL;DR
|
||||
```console
|
||||
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
$ helm install k8s-at-home/tautulli
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
To install the chart with the release name `tautulli`:
|
||||
```console
|
||||
helm install --name my-release k8s-at-home/tautulli
|
||||
helm install tautulli k8s-at-home/tautulli
|
||||
```
|
||||
|
||||
## Upgrading
|
||||
Chart versions before 4.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
|
||||
a top-level `tautulli:` key.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
To uninstall the `tautulli` deployment:
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
helm uninstall tautulli
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/tautulli` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/tautulli/tags/).| `v2.1.39-ls32`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the Tautulli instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the Tautulli instance should run as | `1001` |
|
||||
| `pgid` | process groupID the Tautulli instance should run as | `1001` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the Tautulli GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the Tautulli GUI is exposed| `8181` |
|
||||
| `Service.annotations` | Service annotations for the Tautulli GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the Tautulli GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
|
||||
file. It has several commented out suggested values.
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
helm install tautulli \
|
||||
--set tautulli.env.TZ="America/New York" \
|
||||
k8s-at-home/tautulli
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart.
|
||||
For example,
|
||||
```console
|
||||
helm install --name my-release -f values.yaml k8s-at-home/tautulli
|
||||
helm install tautulli k8s-at-home/tautulli --values values.yaml
|
||||
```
|
||||
|
||||
These values will be nested as it is a dependency, for example
|
||||
```yaml
|
||||
tautulli:
|
||||
image:
|
||||
tag: ...
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
If you get
|
||||
```console
|
||||
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
|
||||
```
|
||||
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/tautulli/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tautulli.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "tautulli.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tautulli.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "tautulli.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8181 to use your application"
|
||||
kubectl port-forward $POD_NAME 8181:80
|
||||
{{- end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "tautulli.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "tautulli.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "tautulli.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
@@ -1,29 +0,0 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "tautulli.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
helm.sh/chart: {{ include "tautulli.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -1,101 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "tautulli.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
helm.sh/chart: {{ include "tautulli.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8181
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
{{- if .Values.persistence.config.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "tautulli.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{ end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "tautulli.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
helm.sh/chart: {{ include "tautulli.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.labels -}}
|
||||
{{ toYaml . | nindent 4 }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,53 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "tautulli.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
helm.sh/chart: {{ include "tautulli.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "tautulli.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
@@ -1,117 +1,10 @@
|
||||
# Default values for tautulli.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/tautulli
|
||||
tag: v2.5.4-ls12
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8181
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
labels: {}
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## tautulli configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
tautulli:
|
||||
image:
|
||||
organization: tautulli
|
||||
repository: tautulli
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v2.5.4
|
||||
service:
|
||||
port: 8181
|
||||
|
||||
@@ -20,4 +20,4 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
OWNERS
|
||||
18
charts/unifi/Chart.yaml
Normal file
18
charts/unifi/Chart.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v2
|
||||
appVersion: 5.12.35
|
||||
description: Ubiquiti Network's Unifi Controller
|
||||
name: unifi
|
||||
version: 1.0.0
|
||||
keywords:
|
||||
- ubiquiti
|
||||
- unifi
|
||||
- mongodb
|
||||
home: https://github.com/k8s-at-home/charts/tree/master/charts/unifi
|
||||
icon: https://blog.ubnt.com/wp-content/uploads/2016/10/unifi-app-logo.png
|
||||
sources:
|
||||
- https://github.com/jacobalberty/unifi-docker
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
||||
- name: mcronce
|
||||
email: mike@quadra-tec.net
|
||||
6
charts/unifi/OWNERS
Normal file
6
charts/unifi/OWNERS
Normal file
@@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- billimek
|
||||
- mcronce
|
||||
reviewers:
|
||||
- billimek
|
||||
- mcronce
|
||||
187
charts/unifi/README.md
Normal file
187
charts/unifi/README.md
Normal file
@@ -0,0 +1,187 @@
|
||||
# Ubiquiti Network's Unifi Controller
|
||||
|
||||
This is a helm chart for [Ubiquiti Network's][ubnt] [Unifi Controller][ubnt 2].
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
helm repo add k8s-at-home https://k8s-at-home.com/charts/
|
||||
helm install k8s-at-home/unifi
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release stable/unifi
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Unifi chart and their default values.
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-------------------------------------------------|------------------------------|------------------------------------------------------------------------------------------------------------------------|
|
||||
| `image.repository` | `jacobalberty/unifi` | Image repository |
|
||||
| `image.tag` | `5.12.35` | Image tag. Possible values listed [here][docker]. |
|
||||
| `image.pullPolicy` | `IfNotPresent` | Image pull policy |
|
||||
| `strategyType` | `Recreate` | Specifies the strategy used to replace old Pods by new ones |
|
||||
| `guiService.type` | `ClusterIP` | Kubernetes service type for the Unifi GUI |
|
||||
| `guiService.port` | `8443` | Kubernetes port where the Unifi GUI is exposed |
|
||||
| `guiService.annotations` | `{}` | Service annotations for the Unifi GUI |
|
||||
| `guiService.labels` | `{}` | Custom labels |
|
||||
| `guiService.loadBalancerIP` | `{}` | Loadbalance IP for the Unifi GUI |
|
||||
| `guiService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to load balancer (if supported) |
|
||||
| `guiService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the Service to either Cluster or Local |
|
||||
| `captivePortalService.enabled` | `false` | Install the captive portal service (needed if you want guest captive portal) |
|
||||
| `captivePortalService.type` | `ClusterIP` | Kubernetes service type for the captive portal |
|
||||
| `captivePortalService.http` | `8880` | Kubernetes port where the captive portal is exposed |
|
||||
| `captivePortalService.https` | `8843` | Kubernetes port where the captive portal is exposed (with SSL) |
|
||||
| `captivePortalService.annotations` | `{}` | Service annotations for the captive portal |
|
||||
| `captivePortalService.labels` | `{}` | Custom labels |
|
||||
| `captivePortalService.loadBalancerIP` | `{}` | Loadbalance IP for the Unifi GUI |
|
||||
| `captivePortalService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to load balancer (if supported) |
|
||||
| `captivePortalService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the Service to either Cluster or Local |
|
||||
| `captivePortalService.ingress.enabled` | `false` | Enables Ingress (for the captive portal, the main ingress needs to be enabled for the controller to be accessible) |
|
||||
| `captivePortalService.ingress.annotations` | `{}` | Ingress annotations for the captive portal |
|
||||
| `captivePortalService.ingress.labels` | `{}` | Custom labels for the captive portal |
|
||||
| `captivePortalService.ingress.path` | `/` | Ingress path for the captive portal |
|
||||
| `captivePortalService.ingress.hosts` | `chart-example.local` | Ingress accepted hostnames for the captive portal |
|
||||
| `captivePortalService.ingress.tls` | `[]` | Ingress TLS configuration for the captive portal |
|
||||
| `controllerService.type` | `NodePort` | Kubernetes service type for the Unifi Controller communication |
|
||||
| `controllerService.port` | `8080` | Kubernetes port where the Unifi Controller is exposed - this needs to be reachable by the unifi devices on the network |
|
||||
| `controllerService.annotations` | `{}` | Service annotations for the Unifi Controller |
|
||||
| `controllerService.labels` | `{}` | Custom labels |
|
||||
| `controllerService.loadBalancerIP` | `{}` | Loadbalance IP for the Unifi Controller |
|
||||
| `controllerService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to load balancer (if supported) |
|
||||
| `controllerService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the Service to either Cluster or Local |
|
||||
| `controllerService.ingress.enabled` | `false` | Enables Ingress for the controller |
|
||||
| `controllerService.ingress.annotations` | `{}` | Ingress annotations for the controller |
|
||||
| `controllerService.ingress.labels` | `{}` | Custom labels for the controller |
|
||||
| `controllerService.ingress.path` | `/` | Ingress path for the controller |
|
||||
| `controllerService.ingress.hosts` | `chart-example.local` | Ingress accepted hostnames for the controller |
|
||||
| `controllerService.ingress.tls` | `[]` | Ingress TLS configuration for the controller |
|
||||
| `stunService.type` | `NodePort` | Kubernetes service type for the Unifi STUN |
|
||||
| `stunService.port` | `3478` | Kubernetes UDP port where the Unifi STUN is exposed |
|
||||
| `stunService.annotations` | `{}` | Service annotations for the Unifi STUN |
|
||||
| `stunService.labels` | `{}` | Custom labels |
|
||||
| `stunService.loadBalancerIP` | `{}` | Loadbalance IP for the Unifi STUN |
|
||||
| `stunService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to load balancer (if supported) |
|
||||
| `stunService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the Service to either Cluster or Local |
|
||||
| `discoveryService.type` | `NodePort` | Kubernetes service type for AP discovery |
|
||||
| `discoveryService.port` | `10001` | Kubernetes UDP port for AP discovery |
|
||||
| `discoveryService.annotations` | `{}` | Service annotations for AP discovery |
|
||||
| `discoveryService.labels` | `{}` | Custom labels |
|
||||
| `discoveryService.loadBalancerIP` | `{}` | Loadbalance IP for AP discovery |
|
||||
| `discoveryService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to load balancer (if supported) |
|
||||
| `discoveryService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the Service to either Cluster or Local |
|
||||
| `unifiedService.enabled` | `false` | Use a single service for GUI, controller, STUN, and discovery |
|
||||
| `unifiedService.type` | `ClusterIP` | Kubernetes service type for the unified service |
|
||||
| `unifiedService.annotations` | `{}` | Annotations for the unified service |
|
||||
| `unifiedService.labels` | `{}` | Custom labels for the unified service |
|
||||
| `unifiedService.loadBalancerIP` | None | Load balancer IP for the unified service |
|
||||
| `unifiedService.loadBalancerSourceRanges` | None | List of IP CIDRs allowed access to the load balancer (if supported) |
|
||||
| `unifiedService.externalTrafficPolicy` | `Cluster` | Set the externalTrafficPolicy in the service to either Cluster or Local |
|
||||
| `ingress.enabled` | `false` | Enables Ingress |
|
||||
| `ingress.annotations` | `{}` | Ingress annotations |
|
||||
| `ingress.labels` | `{}` | Custom labels |
|
||||
| `ingress.path` | `/` | Ingress path |
|
||||
| `ingress.hosts` | `chart-example.local` | Ingress accepted hostnames |
|
||||
| `ingress.tls` | `[]` | Ingress TLS configuration |
|
||||
| `timezone` | `UTC` | Timezone the Unifi controller should run as, e.g. 'America/New York' |
|
||||
| `runAsRoot` | `false` | Run the controller as UID0 (root user); if set to false, will give container SETFCAP instead |
|
||||
| `UID` | `999` | Run the controller as user UID |
|
||||
| `GID` | `999` | Run the controller as group GID |
|
||||
| `customCert.enabled` | `false` | Define whether you are using s custom certificate |
|
||||
| `customCert.isChain` | `false` | If you are using a Let's Encrypt certificate which already includes the full chain set this to `true` |
|
||||
| `customCert.certName` | `tls.crt` | Name of the the certificate file in `<unifi-data>/cert` |
|
||||
| `customCert.keyName` | `tls.key` | Name of the the private key file in `<unifi-data>/cert` |
|
||||
| `customCert.certSecret` | `nil` | Name of the the k8s tls secret where the certificate and its key are stored. |
|
||||
| `mongodb.enabled` | `false` | Use external MongoDB for data storage |
|
||||
| `mongodb.dbUri` | `mongodb://mongo/unifi` | external MongoDB URI |
|
||||
| `mongodb.statDbUri` | `mongodb://mongo/unifi_stat` | external MongoDB statdb URI |
|
||||
| `mongodb.databaseName` | `unifi` | external MongoDB database name |
|
||||
| `persistence.enabled` | `true` | Use persistent volume to store data |
|
||||
| `persistence.size` | `5Gi` | Size of persistent volume claim |
|
||||
| `persistence.existingClaim` | `nil` | Use an existing PVC to persist data |
|
||||
| `persistence.subPath` | `` | Store data in a subdirectory of PV instead of at the root directory |
|
||||
| `persistence.storageClass` | `-` | Type of persistent volume claim |
|
||||
| `extraVolumes` | `[]` | Additional volumes to be used by extraVolumeMounts |
|
||||
| `extraVolumeMounts` | `[]` | Additional volume mounts to be mounted in unifi container |
|
||||
| `persistence.accessModes` | `[]` | Persistence access modes |
|
||||
| `extraConfigFiles` | `{}` | Dictionary containing files mounted to `/configmap` inside the pod (See [values.yaml](values.yaml) for examples) |
|
||||
| `extraJvmOpts` | `[]` | List of additional JVM options, e.g. `["-Dlog4j.configurationFile=file:/configmap/log4j2.xml"]` |
|
||||
| `resources` | `{}` | CPU/Memory resource requests/limits |
|
||||
| `nodeSelector` | `{}` | Node labels for pod assignment |
|
||||
| `tolerations` | `[]` | Toleration labels for pod assignment |
|
||||
| `affinity` | `{}` | Affinity settings for pod assignment |
|
||||
| `podAnnotations` | `{}` | Key-value pairs to add as pod annotations |
|
||||
| `deploymentAnnotations` | `{}` | Key-value pairs to add as deployment annotations |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
stable/unifi
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/unifi
|
||||
```
|
||||
|
||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
## Regarding the services
|
||||
|
||||
- `guiService`: Represents the main web UI and is what one would normally point
|
||||
the ingress to.
|
||||
- `captivePortalService`: This service is used to allow the captive portal webpage
|
||||
to be accessible. It needs to be reachable by the clients connecting to your guest
|
||||
network.
|
||||
- `controllerService`: This is needed in order for the unifi devices to talk to
|
||||
the controller and must be otherwise exposed to the network where the unifi
|
||||
devices run. If you run this as a `NodePort` (the default setting), make sure
|
||||
that there is an external load balancer that is directing traffic from port
|
||||
8080 to the `NodePort` for this service.
|
||||
- `discoveryService`: This needs to be reachable by the unifi devices on the
|
||||
network similar to the controller `Service` but only during the discovery
|
||||
phase. This is a UDP service.
|
||||
- `stunService`: Also used periodically by the unifi devices to communicate
|
||||
with the controller using UDP. See [this article][ubnt 3] and [this other
|
||||
article][ubnt 4] for more information.
|
||||
|
||||
## Ingress and HTTPS
|
||||
|
||||
Unifi does [not support HTTP][unifi] so if you wish to use the guiService, you
|
||||
need to ensure that you use a backend transport of HTTPS.
|
||||
|
||||
An example entry in `values.yaml` to achieve this is as follows:
|
||||
|
||||
```
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
```
|
||||
|
||||
[docker]: https://hub.docker.com/r/jacobalberty/unifi/tags/
|
||||
[github]: https://github.com/jacobalberty/unifi-docker
|
||||
[ubnt]: https://www.ubnt.com/
|
||||
[ubnt 2]: https://unifi-sdn.ubnt.com/
|
||||
[ubnt 3]: https://help.ubnt.com/hc/en-us/articles/204976094-UniFi-What-protocol-does-the-controller-use-to-communicate-with-the-UAP-
|
||||
[ubnt 4]: https://help.ubnt.com/hc/en-us/articles/115015457668-UniFi-Troubleshooting-STUN-Communication-Errors
|
||||
[unifi]: https://community.ui.com/questions/Controller-how-to-deactivate-http-to-https/c5e247d8-b5b9-4c84-a3bb-28a90fd65668
|
||||
19
charts/unifi/templates/NOTES.txt
Normal file
19
charts/unifi/templates/NOTES.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.guiService.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "unifi.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.guiService.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "unifi.fullname" . }}-gui'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "unifi.fullname" . }}-gui -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.guiService.port }}
|
||||
{{- else if contains "ClusterIP" .Values.guiService.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "unifi.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8443:{.Values.guiService.port}
|
||||
Visit https://127.0.0.1:8443 to use your application
|
||||
{{- end }}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user