Compare commits

...

9 Commits

Author SHA1 Message Date
ᗪєνιη ᗷυнʟ
93774a4ed6 [lidarr] use common chart (#123) 2020-11-08 13:25:59 -05:00
ᗪєνιη ᗷυнʟ
5cfe91e0f3 [sonarr] use common chart (#122) 2020-11-08 13:00:49 -05:00
nolte
aacd8ceac0 [mosquitto] add Prometheus Exporter as Sidecar Container (#118)
Signed-off-by: nolte <nolte07@googlemail.com>

Co-authored-by: nolte <nolte07@googlemail.com>
2020-11-08 12:41:30 -05:00
ᗪєνιη ᗷυнʟ
799111dddb [radarr] use new common chart (#121)
* radarr: use new common chart

* jackett

* radarr fix newline
2020-11-08 12:39:37 -05:00
Bernd Schörgers
2aa2718559 [jackett] Bump library, add ingress test (#117) 2020-11-07 16:40:14 -05:00
Bernd Schörgers
2b158892e3 [common] Add capabilities to determine apiVersion (#116)
* [common] Add capabilities to determine apiVersion

* [common] Add capabilities to determine apiVersion
2020-11-07 08:17:31 -05:00
Bernd Schörgers
45c9f3c39e [jackett] Migrate to common library (#113) 2020-11-06 16:40:53 -05:00
Bernd Schörgers
c7f15f37a2 [common] Fix syntax error (#114) 2020-11-06 16:22:57 -05:00
Bernd Schörgers
6b9650f348 [common] Fix classes logic (#112) 2020-11-06 15:54:25 -05:00
45 changed files with 484 additions and 336 deletions

View File

@@ -2,7 +2,7 @@ apiVersion: v2
name: common
description: Function library for k8s-at-home charts
type: library
version: 1.0.1
version: 1.0.4
keywords:
- k8s-at-home
- common

View File

@@ -1,5 +1,5 @@
{{- define "common.deployment" -}}
apiVersion: apps/v1
apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }}
kind: Deployment
metadata:
name: {{ template "common.names.fullname" . }}

View File

@@ -2,11 +2,11 @@
Default NOTES.txt content.
*/}}
{{- define "common.notes.defaultNotes" -}}
{{- $svcPort := .Values.service.port -}}
{{- $svcPort := .Values.service.port.port -}}
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ .host }}{{ (first .paths).path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }})

View File

@@ -1,5 +1,5 @@
{{- define "common.statefulset" -}}
apiVersion: apps/v1
apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }}
kind: StatefulSet
metadata:
name: {{ template "common.names.fullname" . }}

View File

@@ -1,22 +1,16 @@
{{- define "common.classes.ingress" -}}
{{- $apiv1 := .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}}
{{- $apiv1beta1 := .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- $ingressName := include "common.names.fullname" . -}}
{{- $values := .Values.ingress -}}
{{- if and (hasKey . "ObjectValues") (hasKey .ObjectValues "ingress") -}}
{{- $values = .ObjectValues.ingress -}}
{{- if hasKey . "ObjectValues" -}}
{{- with .ObjectValues.ingress -}}
{{- $values = . -}}
{{- end -}}
{{ end -}}
{{- if hasKey $values "nameSuffix" -}}
{{- $ingressName = printf "%v-%v" $ingressName $values.nameSuffix -}}
{{ end -}}
{{- $svcPort := $values.svcPort -}}
{{- if $apiv1 -}}
apiVersion: networking.k8s.io/v1
{{- else if $apiv1beta1 -}}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{ end }}
apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $ingressName }}
@@ -44,19 +38,9 @@ spec:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if or $apiv1beta1 $apiv1 }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if $apiv1 }}
service:
name: {{ $ingressName }}
port:
name: {{ $svcPort }}
{{- else }}
serviceName: {{ $ingressName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,7 +1,9 @@
{{- define "common.classes.pvc" -}}
{{- $values := .Values.persistence -}}
{{- if and (hasKey . "ObjectValues") (hasKey .ObjectValues "persistence") -}}
{{- $values = .ObjectValues.persistence -}}
{{- if hasKey . "ObjectValues" -}}
{{- with .ObjectValues.persistence -}}
{{- $values = . -}}
{{- end -}}
{{ end -}}
{{- $pvcName := include "common.names.fullname" . -}}
{{- if hasKey $values "nameSuffix" -}}

View File

@@ -1,7 +1,9 @@
{{- define "common.classes.service" -}}
{{- $values := .Values.service -}}
{{- if and (hasKey . "ObjectValues") (hasKey .ObjectValues "service") -}}
{{- $values = .ObjectValues.service -}}
{{- if hasKey . "ObjectValues" -}}
{{- with .ObjectValues.service -}}
{{- $values = . -}}
{{- end -}}
{{ end -}}
{{- $svcType := $values.type -}}
apiVersion: v1

View File

@@ -0,0 +1,32 @@
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "common.capabilities.deployment.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "common.capabilities.statefulset.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "apps/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "common.capabilities.ingress.apiVersion" -}}
{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@@ -26,8 +26,8 @@
volumeMounts:
{{- range $index, $PVC := .Values.persistence }}
{{- if $PVC.enabled }}
- mountPath: {{- $PVC.mountPath }}
name: {{- $index }}
- mountPath: {{ $PVC.mountPath }}
name: {{ $index }}
{{- end }}
{{- end }}
{{- if .Values.additionalVolumeMounts }}

View File

@@ -30,7 +30,7 @@ service:
type: ClusterIP
# Specify the default port information
port:
portNumber: ""
port: ""
name: http
protocol: TCP
targetPort: http

View File

@@ -1,8 +1,8 @@
apiVersion: v2
appVersion: v0.16.1045
appVersion: v0.16.2106
description: API Support for your favorite torrent trackers
name: jackett
version: 4.0.1
version: 5.0.2
keywords:
- jackett
- torrent
@@ -15,7 +15,6 @@ maintainers:
- name: billimek
email: jeff@billimek.com
dependencies:
- name: media-common
- name: common
repository: https://k8s-at-home.com/charts/
version: ^1.0.0
alias: jackett
version: ^1.0.4

View File

@@ -1,6 +1,6 @@
# Jackett
This is a helm chart for [Jackett](https://github.com/Jackett/Jackett) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/jackett/)
This is a helm chart for [Jackett](https://github.com/Jackett/Jackett).
## TL;DR;
@@ -28,8 +28,9 @@ helm delete my-release --purge
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
Read through the charts [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/jackett/values.yaml)
file. It has several commented out suggested values.
Additionally you can take a look at the common library [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/common/values.yaml) for more (advanced) configuration options.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
@@ -43,11 +44,9 @@ chart. For example,
helm install jackett k8s-at-home/jackett --values values.yaml
```
These values will be nested as it is a dependency, for example
```yaml
jackett:
image:
tag: ...
image:
tag: ...
```
---
@@ -59,4 +58,19 @@ Error: rendered manifests contain a resource that already exists. Unable to cont
```
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
---
## Upgrading an existing Release to a new major version
A major chart version change (like 4.0.1 -> 5.0.0) indicates that there is an incompatible breaking change potentially needing manual actions.
### Upgrading from 4.x.x to 5.x.x
Due to migrating to a centralized common library some values in `values.yaml` have changed.
Examples:
* `service.port` has been moved to `service.port.port`.
* `persistence.type` has been moved to `controllerType`.
Refer to the library values.yaml for more configuration options.

View File

@@ -1,10 +1,2 @@
jackett:
image:
organization: linuxserver
repository: jackett
tag: v0.16.1045-ls14
service:
type: ClusterIP
port: 9117
ingress:
enabled: false
ingress:
enabled: true

View File

@@ -1,20 +1 @@
{{- $svcPort := .Values.jackett.service.port -}}
1. Get the application URL by running these commands:
{{- if .Values.jackett.ingress.enabled }}
{{- range .Values.jackett.ingress.hosts }}
http{{ if $.Values.jackett.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.jackett.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.jackett.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "media-common.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.jackett.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "media-common.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "media-common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ $svcPort }}
{{- else if contains "ClusterIP" .Values.jackett.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "media-common.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:{{ $svcPort }}
{{- end }}
{{- include "common.notes.defaultNotes" . -}}

View File

@@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@@ -1,22 +0,0 @@
{{- if and .Values.jackett.persistence.torrentblackhole.enabled (not .Values.jackett.persistence.torrentblackhole.existingClaim) }}
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "media-common.fullname" . }}-downloads
{{- if .Values.jackett.persistence.torrentblackhole.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
{{- include "media-common.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.jackett.persistence.torrentblackhole.accessMode | quote }}
resources:
requests:
storage: {{ .Values.jackett.persistence.torrentblackhole.size | quote }}
{{- if .Values.jackett.persistence.torrentblackhole.storageClass }}
storageClassName: {{ if (eq "-" .Values.jackett.persistence.torrentblackhole.storageClass) }}""{{- else }}{{ .Values.jackett.persistence.torrentblackhole.storageClass | quote}}{{- end }}
{{- end }}
{{- end -}}

View File

@@ -1,43 +1,37 @@
# Default values for Jackett.
jackett:
image:
organization: linuxserver
repository: jackett
pullPolicy: IfNotPresent
tag: v0.16.1045-ls14
image:
repository: linuxserver/jackett
pullPolicy: IfNotPresent
tag: version-v0.16.2106
service:
service:
port:
port: 9117
env: {}
# TZ: UTC
# PUID: 1001
# PGID: 1001
env: {}
# TZ: UTC
# PUID: 1001
# PGID: 1001
persistence:
torrentblackhole:
enabled: false
## Jackett torrent torrentblackhole Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 1Gi
## Do not delete the pvc upon helm uninstall
# skipuninstall: false
# existingClaim: ""
persistence:
config:
enabled: true
emptyDir: true
additionalVolumes:
- name: torrentblackhole
emptyDir: {}
## When using persistence.torrentblackhole.enabled: true, adjust this to:
# persistentVolumeClaim:
# claimName: jackett-torrentblackhole
additionalVolumeMounts:
- name: torrentblackhole
mountPath: /downloads
torrentblackhole:
enabled: true
emptyDir: true
mountPath: /downloads
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 1Gi
## Do not delete the pvc upon helm uninstall
# skipuninstall: false
# existingClaim: ""

View File

@@ -1,31 +1,21 @@
apiVersion: v2
name: lidarr
appVersion: 0.8.0.1886
description: Looks and smells like Sonarr but made for music
type: application
version: 4.0.2
appVersion: 0.7.1.1785-ls18
name: lidarr
version: 5.0.0
keywords:
- lidarr
- torrent
- usenet
home: https://github.com/k8s-at-home/charts/tree/master/charts/lidarr
icon: https://github.com/lidarr/Lidarr/blob/develop/Logo/512.png?raw=true
sources:
- https://github.com/Lidarr/Lidarr
- https://hub.docker.com/r/linuxserver/lidarr
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
url: https://github.com/dirtycajunrice
- name: billimek
email: jeff@billimek.com
dependencies:
- name: media-common
- name: common
repository: https://k8s-at-home.com/charts/
version: ^1.0.0
alias: lidarr
annotations:
artifacthub.io/links: |
- name: App Source
url: https://github.com/Lidarr/Lidarr
- name: Default Docker Image
url: https://hub.docker.com/r/linuxserver/lidarr
artifacthub.io/maintainers: |
- name: Nicholas St. Germain
email: nick@cajun.pro
version: ^1.0.4

View File

@@ -1,4 +1,4 @@
approvers:
- DirtyCajunRice
- billimek
reviewers:
- DirtyCajunRice
- billimek

View File

@@ -1,52 +1,36 @@
# Lidarr | Looks and smells like Sonarr but made for music
Umbrella chart that
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
* Deploys [Lidarr](https://github.com/lidarr/Lidarr)
# Lidarr
## TL;DR
```console
This is a helm chart for [Lidarr](https://github.com/lidarr/Lidarr).
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/lidarr
```
## Installing the Chart
To install the chart with the release name `lidarr`:
To install the chart with the release name `my-release`:
```console
helm install lidarr k8s-at-home/lidarr
helm install --name my-release k8s-at-home/lidarr
```
## Upgrading
Chart versions before 4.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
a top-level `lidarr:` key.
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would
be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because
each PVC exposed to the pod as a separate filesystem. It resulted in Lidarr copying files rather than linking;
using additional storage without the user's knowledge.
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
1. [Uninstall](#uninstalling-the-chart) your current release
2. On your backing store, organize your media, ie. media/music, media/downloads
3. If using a pre-existing PVC, create a single new PVC for all of your media
4. Refer to the [configuration](#configuration) for updates to the chart values
5. Re-install the chart
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's
`Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the
`Root Folder` and hit save.
## Uninstalling the Chart
To uninstall the `lidarr` deployment:
To uninstall/delete the `my-release` deployment:
```console
helm uninstall lidarr
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
Read through the charts [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/lidarr/values.yaml)
file. It has several commented out suggested values.
Additionally you can take a look at the common library [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/common/values.yaml) for more (advanced) configuration options.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
@@ -60,11 +44,9 @@ chart. For example,
helm install lidarr k8s-at-home/lidarr --values values.yaml
```
These values will be nested as it is a dependency, for example
```yaml
lidarr:
image:
tag: ...
image:
tag: ...
```
---
@@ -74,6 +56,21 @@ If you get
```console
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
```
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use`existingClaim`.
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
## Upgrading an existing Release to a new major version
A major chart version change (like 4.0.1 -> 5.0.0) indicates that there is an incompatible breaking change potentially needing manual actions.
### Upgrading from 4.x.x to 5.x.x
Due to migrating to a centralized common library some values in `values.yaml` have changed.
Examples:
* `service.port` has been moved to `service.port.port`.
* `persistence.type` has been moved to `controllerType`.
Refer to the library values.yaml for more configuration options.

View File

@@ -0,0 +1,2 @@
ingress:
enabled: true

View File

@@ -0,0 +1 @@
{{- include "common.notes.defaultNotes" . -}}

View File

@@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@@ -1,10 +1,37 @@
# Default values for lidarr.
# Default values for Lidarr.
lidarr:
image:
organization: linuxserver
repository: lidarr
pullPolicy: IfNotPresent
tag: 0.7.1.1785-ls18
service:
image:
repository: linuxserver/lidarr
pullPolicy: IfNotPresent
tag: version-0.8.0.1886
service:
port:
port: 8686
env: {}
# TZ: UTC
# PUID: 1001
# PGID: 1001
persistence:
config:
enabled: true
emptyDir: true
media:
enabled: true
emptyDir: true
mountPath: /media
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 1Gi
## Do not delete the pvc upon helm uninstall
# skipuninstall: false
# existingClaim: ""

View File

@@ -2,7 +2,7 @@ apiVersion: v1
appVersion: "1.6.12"
description: Eclipse Mosquitto - An open source MQTT broker
name: mosquitto
version: 0.4.0
version: 0.5.0
keywords:
- message queue
- MQTT

View File

@@ -0,0 +1,3 @@
monitoring:
sidecar:
enabled: true

View File

@@ -0,0 +1,40 @@
{{- if .Values.monitoring.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
{{ include "mosquitto.labels" . | indent 4 }}
{{- if .Values.monitoring.podMonitor.labels }}
{{ toYaml .Values.monitoring.podMonitor.labels }}
{{- end }}
name: {{ template "mosquitto.fullname" . }}-prometheus-exporter
{{- if .Values.monitoring.podMonitor.namespace }}
namespace: {{ .Values.monitoring.podMonitor.namespace }}
{{- end }}
spec:
podMetricsEndpoints:
- port: prometheus
path: /metrics
{{- if .Values.monitoring.podMonitor.interval }}
interval: {{ .Values.monitoring.podMonitor.interval }}
{{- end }}
{{- if .Values.monitoring.podMonitor.bearerTokenFile }}
bearerTokenFile: {{ .Values.monitoring.podMonitor.bearerTokenFile }}
{{- end }}
{{- if .Values.monitoring.podMonitor.bearerTokenSecret }}
bearerTokenSecret:
name: {{ .Values.monitoring.podMonitor.bearerTokenSecret.name }}
key: {{ .Values.monitoring.podMonitor.bearerTokenSecret.key }}
{{- if .Values.monitoring.podMonitor.bearerTokenSecret.optional }}
optional: {{ .Values.monitoring.podMonitor.bearerTokenSecret.optional }}
{{- end }}
{{- end }}
jobLabel: {{ template "mosquitto.fullname" . }}-prometheus-exporter
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "mosquitto.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -25,6 +25,12 @@ spec:
targetPort: websocket
protocol: TCP
name: websocket
{{- if .Values.monitoring.sidecar.enabled }}
- port: {{ .Values.monitoring.sidecar.port }}
targetPort: prometheus
protocol: TCP
name: prometheus
{{- end }}
selector:
app.kubernetes.io/name: {{ include "mosquitto.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -25,6 +25,23 @@ spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- if .Values.monitoring.sidecar.enabled }}
- name: exporter
image: "{{ .Values.monitoring.sidecar.image.repository }}:{{ .Values.monitoring.sidecar.image.tag }}"
imagePullPolicy: {{ .Values.monitoring.sidecar.image.pullPolicy }}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
args:
{{ toYaml .Values.monitoring.sidecar.args | indent 12 }}
env:
{{ toYaml .Values.monitoring.sidecar.envs | indent 12 }}
resources:
{{ toYaml .Values.monitoring.sidecar.resources | indent 12 }}
ports:
- containerPort: {{ .Values.monitoring.sidecar.port }}
name: prometheus
protocol: TCP
{{- end }}
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}

View File

@@ -86,3 +86,30 @@ extraVolumes: []
extraVolumeMounts: []
# - name: example-name
# mountPath: /path/in/container
monitoring:
podMonitor:
enabled: false
sidecar:
enabled: false
port: 9234
args:
- "--use-splitted-config"
envs:
- name: MQTT_CLIENT_ID
value: exporter
- name: BROKER_HOST
valueFrom:
fieldRef:
fieldPath: status.podIP
image:
repository: nolte/mosquitto-exporter
tag: v0.6.3
pullPolicy: IfNotPresent
resources:
limits:
cpu: 300m
memory: 128Mi
requests:
cpu: 100m
memory: 64Mi

23
charts/radarr/.helmignore Normal file
View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@@ -1,31 +1,21 @@
apiVersion: v2
name: radarr
appVersion: 3.0.0.3989
description: A fork of Sonarr to work with movies à la Couchpotato
type: application
version: 6.0.2
appVersion: 3.0.0.3591
name: radarr
version: 7.0.0
keywords:
- radarr
- torrent
- usenet
home: https://github.com/k8s-at-home/charts/tree/master/charts/radarr
icon: https://github.com/Radarr/Radarr/blob/aphrodite/Logo/512.png?raw=true
sources:
- https://github.com/Radarr/Radarr
- https://hub.docker.com/r/linuxserver/radarr
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
url: https://github.com/dirtycajunrice
- name: billimek
email: jeff@billimek.com
dependencies:
- name: media-common
- name: common
repository: https://k8s-at-home.com/charts/
version: ^1.0.0
alias: radarr
annotations:
artifacthub.io/links: |
- name: App Source
url: https://github.com/Radarr/Radarr
- name: Default Docker Image
url: https://hub.docker.com/r/linuxserver/radarr
artifacthub.io/maintainers: |
- name: Nicholas St. Germain
email: nick@cajun.pro
version: ^1.0.4

View File

@@ -1,4 +1,4 @@
approvers:
- DirtyCajunRice
- billimek
reviewers:
- DirtyCajunRice
- billimek

View File

@@ -1,52 +1,36 @@
# Radarr | A fork of Sonarr to work with movies à la Couchpotato
Umbrella chart that
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/radarr/)
* Deploys [Radarr](https://github.com/Radarr/Radarr)
# Radarr
## TL;DR
```console
This is a helm chart for [Radarr](https://github.com/Radarr/Radarr).
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/radarr
```
## Installing the Chart
To install the chart with the release name `radarr`:
To install the chart with the release name `my-release`:
```console
helm install radarr k8s-at-home/radarr
helm install --name my-release k8s-at-home/radarr
```
## Upgrading
Chart versions before 6.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
a top-level `radarr:` key.
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and Movies. This presented an issue where Radarr would
be unable to hard-link files between the /downloads and /movies directories when importing media. This is caused because
each PVC exposed to the pod as a separate filesystem. It resulted in Radarr copying files rather than linking;
using additional storage without the user's knowledge.
This chart now uses a single PVC for Downloads and Movies. This means all of your media (and downloads) must be in, or
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
1. [Uninstall](#uninstalling-the-chart) your current release
2. On your backing store, organize your media, ie. media/movies, media/downloads
3. If using a pre-existing PVC, create a single new PVC for all of your media
4. Refer to the [configuration](#configuration) for updates to the chart values
5. Re-install the chart
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Radarr's
`Movie Editor` under the `Movies` tab. Simply select all artists in your library, and use the editor to change the
`Root Folder` and hit save.
## Uninstalling the Chart
To uninstall the `radarr` deployment:
To uninstall/delete the `my-release` deployment:
```console
helm uninstall radarr
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
Read through the charts [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/radarr/values.yaml)
file. It has several commented out suggested values.
Additionally you can take a look at the common library [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/common/values.yaml) for more (advanced) configuration options.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
@@ -60,11 +44,9 @@ chart. For example,
helm install radarr k8s-at-home/radarr --values values.yaml
```
These values will be nested as it is a dependency, for example
```yaml
radarr:
image:
tag: ...
image:
tag: ...
```
---
@@ -77,3 +59,18 @@ Error: rendered manifests contain a resource that already exists. Unable to cont
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
## Upgrading an existing Release to a new major version
A major chart version change (like 4.0.1 -> 5.0.0) indicates that there is an incompatible breaking change potentially needing manual actions.
### Upgrading from 6.x.x to 7.x.x
Due to migrating to a centralized common library some values in `values.yaml` have changed.
Examples:
* `service.port` has been moved to `service.port.port`.
* `persistence.type` has been moved to `controllerType`.
Refer to the library values.yaml for more configuration options.

View File

@@ -0,0 +1,2 @@
ingress:
enabled: true

View File

@@ -0,0 +1 @@
{{- include "common.notes.defaultNotes" . -}}

View File

@@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@@ -1,10 +1,37 @@
# Default values for radarr.
# Default values for Radarr.
radarr:
image:
organization: linuxserver
repository: radarr
pullPolicy: IfNotPresent
tag: 3.0.0.3624-ls21
service:
image:
repository: linuxserver/radarr
pullPolicy: IfNotPresent
tag: version-3.0.0.3989
service:
port:
port: 7878
env: {}
# TZ: UTC
# PUID: 1001
# PGID: 1001
persistence:
config:
enabled: true
emptyDir: true
media:
enabled: true
emptyDir: true
mountPath: /media
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 1Gi
## Do not delete the pvc upon helm uninstall
# skipuninstall: false
# existingClaim: ""

View File

@@ -1,31 +1,21 @@
apiVersion: v2
name: sonarr
appVersion: 3.0.4.993
description: Smart PVR for newsgroup and bittorrent users
type: application
version: 6.0.2
appVersion: 3.0.3.913
name: sonarr
version: 7.0.1
keywords:
- sonarr
- torrent
- usenet
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/sonarr
icon: https://github.com/Sonarr/Sonarr/blob/phantom-develop/Logo/512.png?raw=true
sources:
- https://github.com/Sonarr/Sonarr
- https://hub.docker.com/r/linuxserver/sonarr
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
url: https://github.com/dirtycajunrice
- name: billimek
email: jeff@billimek.com
dependencies:
- name: media-common
- name: common
repository: https://k8s-at-home.com/charts/
version: ^1.0.0
alias: sonarr
annotations:
artifacthub.io/links: |
- name: App Source
url: https://github.com/Sonarr/Sonarr
- name: Default Docker Image
url: https://hub.docker.com/r/linuxserver/sonarr
artifacthub.io/maintainers: |
- name: Nicholas St. Germain
email: nick@cajun.pro
version: ^1.0.4

View File

@@ -1,4 +1,4 @@
approvers:
- DirtyCajunRice
- billimek
reviewers:
- DirtyCajunRice
- billimek

View File

@@ -1,52 +1,36 @@
# Sonarr | Smart PVR for newsgroup and bittorrent users
Umbrella chart that
* Uses [media-common](https://github.com/k8s-at-home/charts/tree/master/charts/media-common) as a base
* Adds docker image information leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/sonarr/)
* Deploys [Sonarr](https://github.com/sonarr/Sonarr)
# Sonarr
## TL;DR
```console
This is a helm chart for [Sonarr](https://github.com/Sonarr/Sonarr).
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/sonarr
```
## Installing the Chart
To install the chart with the release name `sonarr`:
To install the chart with the release name `my-release`:
```console
helm install sonarr k8s-at-home/sonarr
helm install --name my-release k8s-at-home/sonarr
```
## Upgrading
Chart versions before 6.0.0 did not use media-common. Upgrading will require you to nest your values.yaml file under
a top-level `sonarr:` key.
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and TV. This presented an issue where Sonarr would
be unable to hard-link files between the /downloads and /tv directories when importing media. This is caused because
each PVC exposed to the pod as a separate filesystem. It resulted in Sonarr copying files rather than linking; using
additional storage without the user's knowledge.
This chart now uses a single PVC for Downloads and TV. This means all of your media (and downloads) must be in, or
be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
1. [Uninstall](#uninstalling-the-chart) your current release
2. On your backing store, organize your media, ie. media/tv, media/downloads
3. If using a pre-existing PVC, create a single new PVC for all of your media
4. Refer to the [configuration](#configuration) for updates to the chart values
5. Re-install the chart
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Sonarr's
`Series Editor` under the `Series` tab. Simply select all series in your library, and use the editor to change the
`Root Folder` and hit save.
## Uninstalling the Chart
To uninstall the `sonarr` deployment:
To uninstall/delete the `my-release` deployment:
```console
helm uninstall sonarr
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
Read through the media-common [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/media-common/values.yaml)
Read through the charts [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/sonarr/values.yaml)
file. It has several commented out suggested values.
Additionally you can take a look at the common library [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/common/values.yaml) for more (advanced) configuration options.
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
@@ -54,17 +38,15 @@ helm install sonarr \
--set sonarr.env.TZ="America/New York" \
k8s-at-home/sonarr
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the
chart. For example,
```console
helm install sonarr k8s-at-home/sonarr --values values.yaml
```
These values will be nested as it is a dependency, for example
```yaml
sonarr:
image:
tag: ...
image:
tag: ...
```
---
@@ -74,7 +56,21 @@ If you get
```console
Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...`
```
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use
`existingClaim`.
it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
## Upgrading an existing Release to a new major version
A major chart version change (like 4.0.1 -> 5.0.0) indicates that there is an incompatible breaking change potentially needing manual actions.
### Upgrading from 6.x.x to 7.x.x
Due to migrating to a centralized common library some values in `values.yaml` have changed.
Examples:
* `service.port` has been moved to `service.port.port`.
* `persistence.type` has been moved to `controllerType`.
Refer to the library values.yaml for more configuration options.

View File

@@ -0,0 +1,2 @@
ingress:
enabled: true

View File

@@ -0,0 +1 @@
{{- include "common.notes.defaultNotes" . -}}

View File

@@ -0,0 +1 @@
{{ include "common.all" . }}

View File

@@ -1,10 +1,37 @@
# Default values for sonarr.
# Default values for Sonarr.
sonarr:
image:
organization: linuxserver
repository: sonarr
pullPolicy: IfNotPresent
tag: 3.0.3.913-ls40
service:
image:
repository: linuxserver/sonarr
pullPolicy: IfNotPresent
tag: version-3.0.4.993
service:
port:
port: 8989
env: {}
# TZ: UTC
# PUID: 1001
# PGID: 1001
persistence:
config:
enabled: true
emptyDir: true
media:
enabled: true
emptyDir: true
mountPath: /media
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 1Gi
## Do not delete the pvc upon helm uninstall
# skipuninstall: false
# existingClaim: ""