global: image: # -- if set it will overwrite all registry entries registry: security: # required for bitnamilegacy repos allowInsecureImages: true ## ref: https://hub.docker.com/r/library/nextcloud/tags/ ## image: registry: docker.io repository: library/nextcloud flavor: apache # default is generated by flavor and appVersion tag: 33.0.1-apache pullPolicy: IfNotPresent # pullSecrets: # - myRegistrKeySecretName nameOverride: "" fullnameOverride: "" podAnnotations: {} podLabels: {} deploymentAnnotations: {} deploymentLabels: {} # Number of replicas to be deployed replicaCount: 1 ## Allowing use of ingress controllers ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ## ingress: enabled: true className: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt-prod traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/proxy-body-size: "0" # HSTS traefik.ingress.kubernetes.io/headers.customResponseHeaders.Strict-Transport-Security: "max-age=15552000; includeSubDomains; preload" hosts: - host: nextcloud.immich-ad.ovh paths: - path: / pathType: Prefix tls: - hosts: - nextcloud.immich-ad.ovh secretName: nextcloud-tls labels: {} path: / pathType: Prefix # Allow configuration of lifecycle hooks # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ lifecycle: {} # lifecycle: # postStartCommand: [] # preStopCommand: [] phpClientHttpsFix: enabled: false protocol: https nextcloud: host: nextcloud.immich-ad.ovh username: admin password: changeme ## Use an existing secret existingSecret: enabled: false # secretName: nameofsecret usernameKey: nextcloud-username passwordKey: nextcloud-password tokenKey: "" smtpUsernameKey: smtp-username smtpPasswordKey: smtp-password smtpHostKey: smtp-host update: 0 # If web server is not binding default port, you can define it containerPort: 80 datadir: /var/www/html/data persistence: subPath: # if set, we'll template this list to the NEXTCLOUD_TRUSTED_DOMAINS env var trustedDomains: ["nextcloud.nextcloud.svc.cluster.local","nextcloud.immich-ad.ovh", "nextcloud", "localhost"] ## SMTP configuration mail: enabled: true # the user we send email as fromAddress: admin # the domain we send email from domain: immich-ad.ovh smtp: host: ssl0.ovh.net secure: starttls port: 587 authtype: LOGIN name: 'admin@immich-ad.ovh' password: ',3FV\]Knv_AqC' ## PHP Configuration files # Will be injected in /usr/local/etc/php/conf.d for apache image and in /usr/local/etc/php-fpm.d when nginx.enabled: true phpConfigs: zzz-memory.ini: | memory_limit = 1024M max_execution_time = 360 upload_max_filesize = 2G post_max_size = 2G opcache.ini: | opcache.enable=1 opcache.memory_consumption=256 opcache.interned_strings_buffer=32 opcache.max_accelerated_files=20000 opcache.revalidate_freq=60 opcache.save_comments=1 opcache.fast_shutdown=1 ## Default config files that utilize environment variables: # see: https://github.com/nextcloud/docker/tree/master#auto-configuration-via-environment-variables # IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself # Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/.config defaultConfigs: # To protect /var/www/html/config .htaccess: true # Apache configuration for rewrite urls apache-pretty-urls.config.php: true # Define APCu as local cache apcu.config.php: true # Apps directory configs apps.config.php: true # Used for auto configure database autoconfig.php: true # Redis default configuration redis.config.php: |- '\OC\Memcache\Redis', 'memcache.local' => '\OC\Memcache\APCu', 'redis' => [ 'host' => 'nextcloud-redis-master', 'port' => 6379, 'password' => 'StrongRedisPass', 'timeout' => 1.5, ], ]; # Reverse proxy default configuration reverse-proxy.config.php: true # S3 Object Storage as primary storage s3.config.php: true # SMTP default configuration via environment variables smtp.config.php: true # Swift Object Storage as primary storage swift.config.php: true # disables the web based updater as the default nextcloud docker image does not support it upgrade-disable-web.config.php: true # -- imaginary support config imaginary.config.php: false # Extra config files created in /var/www/html/config/ # ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file configs: audit.config.php: |- 'syslog', 'syslog_tag_audit' => 'Nextcloud', 'logfile_audit' => '', ); # For example, to enable image and text file previews: # previews.config.php: |- # true, # 'enabledPreviewProviders' => array ( # 'OC\Preview\Movie', # 'OC\Preview\PNG', # 'OC\Preview\JPEG', # 'OC\Preview\GIF', # 'OC\Preview\BMP', # 'OC\Preview\XBitmap', # 'OC\Preview\MP3', # 'OC\Preview\MP4', # 'OC\Preview\TXT', # 'OC\Preview\MarkDown', # 'OC\Preview\PDF' # ), # ); # Hooks for auto configuration # Here you could write small scripts which are placed in `/docker-entrypoint-hooks.d//helm.sh` # ref: https://github.com/nextcloud/docker?tab=readme-ov-file#auto-configuration-via-hook-folders hooks: pre-installation: post-installation: pre-upgrade: post-upgrade: before-starting: ## Strategy used to replace old pods ## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy strategy: type: Recreate # type: RollingUpdate # rollingUpdate: # maxSurge: 1 # maxUnavailable: 0 ## ## Extra environment variables extraEnv: - name: OVERWRITEPROTOCOL value: https - name: OVERWRITECLIURL value: https://nextcloud.immich-ad.ovh - name: TRUSTED_PROXIES value: "10.244.0.0/16" # Extra init containers that runs before pods start. extraInitContainers: [] # - name: do-something # image: busybox # command: ['do', 'something'] # Extra sidecar containers. extraSidecarContainers: [] # - name: nextcloud-logger # image: busybox # command: [/bin/sh, -c, 'while ! test -f "/run/nextcloud/data/nextcloud.log"; do sleep 1; done; tail -n+1 -f /run/nextcloud/data/nextcloud.log'] # volumeMounts: # - name: nextcloud-data # mountPath: /run/nextcloud/data # Extra mounts for the pods. Example shown is for connecting a legacy NFS volume # to NextCloud pods in Kubernetes. This can then be configured in External Storage extraVolumes: # - name: nfs # nfs: # server: "10.0.0.1" # path: "/nextcloud_data" # readOnly: false extraVolumeMounts: # - name: nfs # mountPath: "/legacy_data" # Set securityContext parameters for the nextcloud CONTAINER only (will not affect nginx container). # For example, you may need to define runAsNonRoot directive securityContext: {} # runAsUser: 33 # runAsGroup: 33 # runAsNonRoot: true # readOnlyRootFilesystem: false # Set securityContext parameters for the entire pod. For example, you may need to define runAsNonRoot directive podSecurityContext: {} # runAsUser: 33 # runAsGroup: 33 # runAsNonRoot: true # readOnlyRootFilesystem: false # Settings for the MariaDB init container mariaDbInitContainer: resources: {} # Set mariadb initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive securityContext: {} # Settings for the PostgreSQL init container postgreSqlInitContainer: resources: {} # Set postgresql initContainer securityContext parameters. For example, you may need to define runAsNonRoot directive securityContext: {} # -- priority class for nextcloud. # Overrides .Values.priorityClassName priorityClassName: "" ## ## External database configuration ## externalDatabase: enabled: true type: postgresql host: nextcloud-postgresql # service name of subchart (default) #user: nextcloud #database: nextcloud #password: "MyStrongPass123" existingSecret: enabled: true secretName: nextcloud-db passwordKey: password ## ## PostgreSQL chart configuration ## for more options see https://github.com/bitnami/charts/tree/main/bitnami/postgresql ## postgresql: enabled: true image: registry: docker.io repository: bitnamilegacy/postgresql global: postgresql: # global.postgresql.auth overrides postgresql.auth #auth: # username: nextcloud # password: "MyStrongPass123" # database: nextcloud auth: #username: nextcloud #database: nextcloud existingSecret: nextcloud-postgresql primary: resources: requests: memory: 512Mi limits: memory: 1Gi persistence: enabled: true # Use an existing Persistent Volume Claim (must be created ahead of time) existingClaim: pvc-nextcloud-postgres storageClass: nextcloud-postgres-storage ## ## Collabora chart configuration ## for more options see https://github.com/CollaboraOnline/online/tree/master/kubernetes/helm/collabora-online ## collabora: enabled: true # url in admin should be: https://collabora.immich-ad.ovh collabora: ## HTTPS nextcloud domain, if needed aliasgroups: - host: https://nextcloud.immich-ad.ovh:443 securityContext: privileged: true env: # We terminate TLS at Traefik, so Collabora must not try to do HTTPS itself - name: DONT_GEN_SSL_CERT value: "true" # Tell Collabora which Nextcloud URL is allowed to use it - name: aliasgroup1 value: https://nextcloud.immich-ad.ovh:443 # set extra parameters for collabora # you may need to add --o:ssl.termination=true extra_params: > --o:ssl.enable=false --o:ssl.termination=true ## Specify server_name when the hostname is not reachable directly for # example behind reverse-proxy. example: collabora.domain server_name: null existingSecret: # set to true to to get collabora admin credentials from an existin secret # if set, ignores collabora.collabora.username and password enabled: false # name of existing Kubernetes Secret with collboara admin credentials secretName: "" usernameKey: "username" passwordKey: "password" # setup admin login credentials, these are ignored if # collabora.collabora.existingSecret.enabled=true password: examplepass username: admin # setup ingress ingress: enabled: true className: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt-prod traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/proxy-body-size: "0" traefik.ingress.kubernetes.io/router.tls: "true" hosts: - host: collabora.immich-ad.ovh paths: - path: / pathType: Prefix tls: - hosts: - collabora.immich-ad.ovh secretName: collabora-tls # see collabora helm README.md for recommended values resources: {} readinessProbe: enabled: true path: /hosting/discovery port: 9980 scheme: HTTP initialDelaySeconds: 40 periodSeconds: 20 timeoutSeconds: 5 failureThreshold: 6 livenessProbe: enabled: true path: /hosting/discovery port: 9980 scheme: HTTP initialDelaySeconds: 60 ## Cronjob to execute Nextcloud background tasks ## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron ## cronjob: enabled: true # Either 'sidecar' or 'cronjob' type: sidecar # Runs crond as a sidecar container in the Nextcloud pod # Note: crond requires root sidecar: ## Cronjob sidecar resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ ## resources: {} # Allow configuration of lifecycle hooks # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ lifecycle: {} # lifecycle: # postStartCommand: [] # preStopCommand: [] # Set securityContext parameters. For example, you may need to define runAsNonRoot directive securityContext: {} # runAsUser: 33 # runAsGroup: 33 # runAsNonRoot: true # readOnlyRootFilesystem: true # The command the cronjob container executes. command: - /cron.sh # Uses a Kubernetes CronJob to execute the Nextcloud cron tasks # Note: can run as non-root user. Should run as same user as the Nextcloud pod. cronjob: # Use a CronJob instead of crond sidecar container # crond does not work when not running as root user # Note: requires `persistence.enabled=true` schedule: "*/5 * * * *" successfulJobsHistoryLimit: 3 failedJobsHistoryLimit: 5 # -- Additional labels for cronjob labels: {} # -- Additional labels for cronjob pod podLabels: {} annotations: {} backoffLimit: 1 affinity: {} # Often RWO volumes are used. But the cronjob pod needs access to the same volume as the nextcloud pod. # Depending on your provider two pods on the same node can still access the same volume. # Following config ensures that the cronjob pod is scheduled on the same node as the nextcloud pod. # affinity: # podAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: app.kubernetes.io/name # operator: In # values: # - nextcloud # - key: app.kubernetes.io/component # operator: In # values: # - app # topologyKey: kubernetes.io/hostname ## Resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ ## resources: {} # -- priority class for the cron job. # Overrides .Values.priorityClassName priorityClassName: "" # Allow configuration of lifecycle hooks # ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/ # Set securityContext parameters. For example, you may need to define runAsNonRoot directive securityContext: {} # runAsUser: 33 # runAsGroup: 33 # runAsNonRoot: true # readOnlyRootFilesystem: true # The command to run in the cronjob container # Example to incerase memory limit: php -d memory_limit=2G ... command: - php - -f - /var/www/html/cron.php - -- - --verbose service: type: ClusterIP port: 8080 loadBalancerIP: "" nodePort: # -- use additional annotation on service for nextcloud annotations: {} # -- Set this to "ClientIP" to make sure that connections from the same client # are passed to the same Nextcloud pod each time. sessionAffinity: "" sessionAffinityConfig: {} ## Enable persistence using Persistent Volume Claims ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## persistence: # Nextcloud Data (/var/www/html) enabled: true existingClaim: pvc-nextcloud-data storageClass: nextcloud-data ## Use an additional pvc for the data directory rather than a subpath of the default PVC ## Useful to store data on a different storageClass (e.g. on slower disks) nextcloudData: enabled: false subPath: labels: {} annotations: {} # storageClass: "-" # existingClaim: accessMode: ReadWriteOnce size: 8Gi redis: enabled: yes architecture: standalone auth: enabled: true password: "StrongRedisPass" master: persistence: enabled: false size: 1Gi resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # resources: # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # -- Priority class for pods. This is the _default_ # priority class for pods created by this deployment - it may be # overridden by more specific instances of priorityClassName - # e.g. cronjob.cronjob.priorityClassName priorityClassName: "" ## Liveness and readiness probe values ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes ## livenessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 20 timeoutSeconds: 5 failureThreshold: 3 successThreshold: 1 readinessProbe: enabled: true initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 failureThreshold: 3 successThreshold: 1 startupProbe: enabled: false initialDelaySeconds: 50 periodSeconds: 30 timeoutSeconds: 5 failureThreshold: 30 successThreshold: 1 ## Enable pod autoscaling using HorizontalPodAutoscaler ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ ## hpa: enabled: false cputhreshold: 60 minPods: 1 maxPods: 10 nodeSelector: {} tolerations: [] # -- Nextcloud pod topologySpreadConstraints topologySpreadConstraints: [] affinity: {} dnsConfig: {} # Custom dns config for Nextcloud containers. # You can for example configure ndots. This may be needed in some clusters with alpine images. # options: # - name: ndots # value: "1" imaginary: # -- Start Imgaginary enabled: false # -- Number of imaginary pod replicas to deploy replicaCount: 1 image: # -- Imaginary image registry registry: docker.io # -- Imaginary image name repository: h2non/imaginary # -- Imaginary image tag tag: 1.2.4 # -- Imaginary image pull policy pullPolicy: IfNotPresent # -- Imaginary image pull secrets pullSecrets: [] # -- Additional annotations for imaginary podAnnotations: {} # -- Additional labels for imaginary podLabels: {} # -- Imaginary pod nodeSelector nodeSelector: {} # -- Imaginary pod tolerations tolerations: [] # -- Imaginary pod topologySpreadConstraints topologySpreadConstraints: [] # -- imaginary resources resources: {} # -- priority class for imaginary. # Overrides .Values.priorityClassName priorityClassName: "" # -- Optional security context for the Imaginary container securityContext: runAsUser: 1000 runAsNonRoot: true # allowPrivilegeEscalation: false # capabilities: # drop: # - ALL # -- Optional security context for the Imaginary pod (applies to all containers in the pod) podSecurityContext: {} # runAsNonRoot: true # seccompProfile: # type: RuntimeDefault readinessProbe: enabled: true failureThreshold: 3 successThreshold: 1 periodSeconds: 10 timeoutSeconds: 1 livenessProbe: enabled: true failureThreshold: 3 successThreshold: 1 periodSeconds: 10 timeoutSeconds: 1 service: # -- Imaginary: Kubernetes Service type type: ClusterIP # -- Imaginary: LoadBalancerIp for service type LoadBalancer loadBalancerIP: # -- Imaginary: NodePort for service type NodePort nodePort: # -- Additional annotations for service imaginary annotations: {} # -- Additional labels for service imaginary labels: {} ## Prometheus Exporter / Metrics ## metrics: enabled: false replicaCount: 1 # Optional: becomes NEXTCLOUD_SERVER env var in the nextcloud-exporter container. # Without it, we will use the full name of the nextcloud service server: "" # The metrics exporter needs to know how you serve Nextcloud either http or https https: false # Use API token if set, otherwise fall back to password authentication # https://github.com/xperimental/nextcloud-exporter#token-authentication # Currently you still need to set the token manually in your nextcloud install token: "" timeout: 5s # if set to true, exporter skips certificate verification of Nextcloud server. tlsSkipVerify: false info: # Optional: becomes NEXTCLOUD_INFO_APPS env var in the nextcloud-exporter container. # Enables gathering of apps-related metrics. Defaults to false apps: false update: false image: registry: docker.io repository: xperimental/nextcloud-exporter tag: 0.8.0 pullPolicy: IfNotPresent # pullSecrets: # - myRegistrKeySecretName ## Metrics exporter resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ ## resources: {} # -- Metrics exporter pod Annotation podAnnotations: {} # -- Metrics exporter pod Labels podLabels: {} # -- Metrics exporter pod nodeSelector nodeSelector: {} # -- Metrics exporter pod tolerations tolerations: [] # -- Metrics exporter pod affinity affinity: {} service: type: ClusterIP # Use serviceLoadBalancerIP to request a specific static IP, # otherwise leave blank loadBalancerIP: annotations: prometheus.io/scrape: "true" prometheus.io/port: "9205" labels: {} # -- security context for the metrics CONTAINER in the pod securityContext: runAsUser: 1000 runAsNonRoot: true # allowPrivilegeEscalation: false # capabilities: # drop: # - ALL # -- security context for the metrics POD podSecurityContext: {} # runAsNonRoot: true # seccompProfile: # type: RuntimeDefault ## Prometheus Operator ServiceMonitor configuration ## serviceMonitor: ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator ## enabled: false ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running ## namespace: "" ## @param metrics.serviceMonitor.namespaceSelector The selector of the namespace where the target service is located (defaults to the release namespace) namespaceSelector: ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. ## jobLabel: "" ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped # ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint ## interval: 30s ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended # ref: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.Endpoint ## scrapeTimeout: "" ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor ## labels: {} rules: # -- Deploy Prometheus Rules (Alerts) for the exporter # @section -- Metrics enabled: false # -- Label on Prometheus Rules CRD Manifest # @section -- Metrics labels: {} defaults: # -- Add Default Rules # @section -- Metrics enabled: true # -- Label on the rules (the severity is already set) # @section -- Metrics labels: {} # -- Filter on metrics on alerts (default just for this helm-chart) # @section -- Metrics filter: "" # -- Add own Rules to Prometheus Rules # @section -- Metrics additionalRules: [] # -- Allows users to inject additional Kubernetes manifests (YAML) to be rendered with the release. # Could either be a list or a map # If a map, each key is the name of the manifest. # If an array, each item is a manifest, which can be a string (YAML block) or a YAML object. # Each item should be a string containing valid YAML. Example: # extraManifests: # - | # apiVersion: traefik.containo.us/v1alpha1 # kind: Middleware # metadata: # name: my-middleware # spec: # ... # - | # apiVersion: traefik.containo.us/v1alpha1 # kind: IngressRoute # metadata: # name: my-ingressroute # spec: # ... # Or as a map: # extraManifests: # my-middleware: # apiVersion: traefik.containo.us/v1alpha1 # kind: Middleware # metadata: # name: my-middleware # spec: # ... # my-ingressroute: # apiVersion: traefik.containo.us/v1alpha1 # kind: IngressRoute # metadata: # name: my-ingressroute # spec: # ... extraManifests: []