...
 
Commits (4)
---
apiVersion: k8s.beryju.org/v1alpha1
kind: P2
metadata:
......@@ -5,10 +6,13 @@ metadata:
spec:
version: 0.7.6
config:
# Optionally specify fixed secret_key, otherwise generated automatically
# secret_key: _k*@6h2u2@q-dku57hhgzb7tnx*ba9wodcb^s9g0j59@=y(@_o
secret_key: "r-9k#x4tkk2e8%=(9hf#^v4&=5z2)^gzn^)l*_=z+&0a97kwd8"
redis:
password: "ThisIsNotASecurePassword!"
postgresql:
postgresqlPassword: "ThisIsNotASecurePassword!"
config:
# Enable error reporting (errors are sent to sentry.beryju.org)
error_reporting: true
......@@ -26,16 +30,12 @@ spec:
user_url: ""
deployment:
webInstances: 2
webInstances: 1
workerInstances: 1
# To disable tier0, set the values below to 0
tier0Instances: 2
grpcInstances: 1
postgresql:
postgresqlUsername: p2
postgresqlDatabase: p2
ingress:
enabled: true
serve:
......@@ -44,7 +44,7 @@ spec:
hosts:
- "p2.local"
tls:
- secretName: chart-example-tls
- secretName: example-p2-tls
hosts:
- i.p2.local
- p2.local
......
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: p2-fallback-ingress
namespace: prod-p2
spec:
backend:
serviceName: p2-web
servicePort: http
dependencies:
- name: postgresql
version: 3.10.1
version: 6.2.1
repository: https://kubernetes-charts.storage.googleapis.com/
- name: redis
version: 5.1.0
version: 9.0.2
repository: https://kubernetes-charts.storage.googleapis.com/
......@@ -9,9 +9,11 @@ data:
# Set this to true if you only want to use external authentication
P2_EXTERNAL_AUTH_ONLY: {{ .Values.config.external_auth_only | quote }}
# Callback URL: <base url>/_/oidc/callback/
# P2_OIDC__ENABLED: 'false'
# P2_OIDC__CLIENT_ID: ''
# P2_OIDC__CLIENT_SECRET: ''
# P2_OIDC__AUTH_URL: ''
# P2_OIDC__TOKEN_URL: ''
# P2_OIDC__USER_URL: ''
{{- if .Values.config.oidc.enabled -}}
P2_OIDC__ENABLED: 'true'
P2_OIDC__CLIENT_ID: '{{ .Values.config.oidc.client_id }}'
P2_OIDC__CLIENT_SECRET: '{{ .Values.config.oidc.client_secret }}'
P2_OIDC__AUTH_URL: '{{ .Values.config.oidc.auth_url }}'
P2_OIDC__TOKEN_URL: '{{ .Values.config.oidc.token_url }}'
P2_OIDC__USER_URL: '{{ .Values.config.oidc.user_url }}'
{{ end }}
......@@ -24,7 +24,6 @@ spec:
prometheus.io/scrape: "true"
field.cattle.io/workloadMetrics: '[{"path":"/metrics","port":9102,"schema":"HTTP"}]'
spec:
serviceAccountName: {{ include "p2.fullname" . }}-cni
securityContext:
fsGroup: 100
volumes:
......@@ -78,8 +77,8 @@ spec:
name: media-storage
resources:
requests:
cpu: 500m
cpu: 150m
memory: 200M
limits:
cpu: 1000m
cpu: 300m
memory: 250M
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "p2.fullname" . }}-cni
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "p2.fullname" . }}-cni-role
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- list
- apiGroups:
- extensions
- apps
resources:
- deployments
- deployments/scale
verbs:
- get
- list
- patch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- delete
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- list
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "p2.fullname" . }}-cni
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "p2.fullname" . }}-cni-role
subjects:
- kind: ServiceAccount
name: {{ include "p2.fullname" . }}-cni
......@@ -10,4 +10,4 @@ metadata:
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
type: Opaque
data:
secret_key: {{ randAlphaNum 50 | sha256sum | b64enc | quote }}
secret_key: {{ .Values.secret_key | b64enc | quote }}
......@@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: {{ .Chart.Name }}-static
image: "docker.beryju.org/p2/static:{{ .version }}"
image: "docker.beryju.org/p2/static:{{ .Values.version }}"
imagePullPolicy: IfNotPresent
ports:
- name: http
......
......@@ -24,7 +24,6 @@ spec:
prometheus.io/scrape: "true"
field.cattle.io/workloadMetrics: '[{"path":"/metrics","port":9102,"schema":"HTTP"}]'
spec:
serviceAccountName: {{ include "p2.fullname" . }}-cni
securityContext:
fsGroup: 100
volumes:
......@@ -130,7 +129,7 @@ spec:
value: kubernetes-healthcheck-host
resources:
requests:
cpu: 300m
cpu: 100m
memory: 175M
limits:
cpu: 500m
......
......@@ -21,7 +21,6 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
k8s.p2.io/component: worker
spec:
serviceAccountName: {{ include "p2.fullname" . }}-cni
securityContext:
fsGroup: 100
volumes:
......
version: 0.7.6
config:
# Optionally specify fixed secret_key, otherwise generated automatically
# secret_key: _k*@6h2u2@q-dku57hhgzb7tnx*ba9wodcb^s9g0j59@=y(@_o
# Replace this with your own secret_key (used to sign cookies and others)
secret_key: "y(qs_&z!7u+!7rq6z3fx=p)pfx_ah3l(i&#p(dx5cu=d3=knq3"
config:
# Enable error reporting (errors are sent to sentry.beryju.org)
error_reporting: true
......
......@@ -10,7 +10,6 @@ from rest_framework_jwt.views import (obtain_jwt_token, refresh_jwt_token,
from p2.api.permissions import CustomObjectPermissions
from p2.api.viewsets import APIKeyViewSet, UserViewSet
from p2.core.api.viewsets import BlobViewSet, StorageViewSet, VolumeViewSet
from p2.k8s.api.views import ScaleAPIViewSet
from p2.serve.api.viewsets import ServeRuleViewSet
INFO = openapi.Info(
......@@ -33,7 +32,6 @@ ROUTER.register('core/volume', VolumeViewSet)
ROUTER.register('core/storage', StorageViewSet)
ROUTER.register('system/user', UserViewSet)
ROUTER.register('system/key', APIKeyViewSet)
ROUTER.register('system/scale', ScaleAPIViewSet, basename='k8s')
ROUTER.register('tier0/policy', ServeRuleViewSet)
app_name = 'p2_api'
......
"""p2 k8s api views"""
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from p2.k8s.component_controller import (GRPC_DEPLOYMENT, STATIC_DEPLOYMENT,
TIER0_DEPLOYMENT, WEB_DEPLOYMENT,
WORKER_DEPLOYMENT)
CONTROLLER_MAP = {
'web': WEB_DEPLOYMENT,
'grpc': GRPC_DEPLOYMENT,
'static': STATIC_DEPLOYMENT,
'tier0': TIER0_DEPLOYMENT,
'worker': WORKER_DEPLOYMENT
}
class ScaleAPIViewSet(ViewSet):
"""Scale various features of p2 Up and Down as well as controlling AutoScaling."""
permission_classes = [IsAdminUser]
def list(self, request):
"""Return a list of all manageable components"""
return Response(CONTROLLER_MAP.keys())
# pylint: disable=invalid-name
def retrieve(self, request, pk):
"""Return the current scale of the given component"""
return Response(CONTROLLER_MAP[pk].scale)
"""p2 K8s App Config"""
from importlib import import_module
from django.apps import AppConfig
class P2K8sConfig(AppConfig):
"""p2 K8s App Config"""
name = 'p2.k8s'
label = 'p2_k8s'
verbose_name = 'p2 K8s'
def ready(self):
super().ready()
import_module('p2.k8s.component_controller')
import_module('p2.k8s.ingress_controller')
"""p2 k8s component controller"""
from math import ceil
from typing import List
from django.conf import settings
from kubernetes.client import AppsV1Api, AutoscalingV2beta2Api
from kubernetes.client.models import (V1ObjectMeta,
V2beta2CrossVersionObjectReference,
V2beta2HorizontalPodAutoscaler,
V2beta2HorizontalPodAutoscalerSpec,
V2beta2MetricSpec, V2beta2MetricTarget,
V2beta2ResourceMetricSource)
from kubernetes.client.rest import ApiException
from structlog import get_logger
from p2.k8s.exceptions import DeploymentNotFound, InvalidDeploymentScale
from p2.k8s.helper import FIELD_MANAGER, MANAGED_BY, APIHelper
LOGGER = get_logger()
DEPLOYMENT_SELECTOR = 'k8s.p2.io/deployment'
class ComponentController(APIHelper):
"""Controls whether a feature is enabled/disabled, the scale of it and
Horizontal autoscaling."""
_apps_client: AppsV1Api = None
_autoscaling_client: AutoscalingV2beta2Api = None
_is_optional = False
name = ""
dependencies = []
dependency_scale_ration = 1
def __init__(self, selector,
optional=False,
dependencies: List['ComponentController'] = None,
dependency_scale_ration=1):
super().__init__()
self._apps_client = AppsV1Api(self._client)
self._autoscaling_client = AutoscalingV2beta2Api(self._client)
self._is_optional = optional
try:
self.name = self._find_deployments(selector)
except ApiException as exc:
LOGGER.warning("Failed to find deployment", error=exc)
return
self.dependencies = dependencies or []
self.dependency_scale_ration = dependency_scale_ration
def _find_deployments(self, selector_value):
"""Find deployment by matching selector"""
deployment_list = self._apps_client.list_namespaced_deployment(
self._namespace, label_selector=f"k8s.p2.io/deployment={selector_value}")
if not deployment_list.items:
raise DeploymentNotFound
deployment = deployment_list.items[0]
LOGGER.debug("Found deployment for selector",
selector=selector_value, deployment=deployment.metadata.name)
return deployment.metadata.name
@property
def scale(self) -> int:
"""Get current scale from k8s"""
return self._apps_client.read_namespaced_deployment_scale(
self.name, self._namespace, pretty=settings.DEBUG).spec.replicas
@scale.setter
def scale(self, replicas: int):
"""Scale deployment"""
if replicas < 1 and not self._is_optional:
raise InvalidDeploymentScale
current = self._apps_client.read_namespaced_deployment_scale(
self.name, self._namespace, pretty=settings.DEBUG)
current.spec.replicas = replicas
# Before we scale, check dependencies and scale those
if self.dependencies:
for dependency in self.dependencies:
LOGGER.debug("Scaled dependency", dependency=dependency.name)
dependency.scale = ceil(
replicas * dependency.dependency_scale_ration * self.dependency_scale_ration)
self._apps_client.patch_namespaced_deployment_scale(
self.name, self._namespace, current, pretty=settings.DEBUG)
LOGGER.info("Successfully scaled deployment",
deployment=self.name,
replicas=replicas)
@property
def status(self) -> int:
"""Return number of healthy pods"""
deployment = self._apps_client.read_namespaced_deployment(self.name, self._namespace)
return deployment.status.ready_replicas
def enable_autoscaling(self, min_replicas, max_replicas):
"""Enable HPA"""
name = f"{self.name}-hpa"
hpa = V2beta2HorizontalPodAutoscaler(
metadata=V1ObjectMeta(
name=name,
labels=MANAGED_BY
),
spec=V2beta2HorizontalPodAutoscalerSpec(
max_replicas=max_replicas,
min_replicas=min_replicas,
scale_target_ref=V2beta2CrossVersionObjectReference(
kind="Deployment",
name=self.name,
api_version="extensions/v1"
),
metrics=[
V2beta2MetricSpec(
type="Resource",
resource=V2beta2ResourceMetricSource(
name="cpu",
target=V2beta2MetricTarget(
type="Utilization",
average_utilization=70
)
)
)
]
)
)
response = self._autoscaling_client.create_namespaced_horizontal_pod_autoscaler(
self._namespace, hpa, field_manager=FIELD_MANAGER, _preload_content=False)
LOGGER.debug("Successfully enabled Autoscaling",
deployment=self.name,
min_replicas=min_replicas,
max_replicas=max_replicas)
return response
def disable_autoscaling(self):
"""Delete HorizontalPodAutoscaler"""
response = self._autoscaling_client.delete_namespaced_horizontal_pod_autoscaler(
f"{self.name}-hpa", self._namespace)
LOGGER.debug("Successfully disabled Autoscaling",
deployment=self.name)
return response
WEB_DEPLOYMENT = ComponentController("web")
STATIC_DEPLOYMENT = ComponentController("static")
GRPC_DEPLOYMENT = ComponentController("grpc", optional=True, dependency_scale_ration=0.2)
TIER0_DEPLOYMENT = ComponentController("tier0", optional=True, dependencies=[GRPC_DEPLOYMENT])
WORKER_DEPLOYMENT = ComponentController("worker")
"""p2 k8s exceptions"""
class K8sException(Exception):
"""Base Exception for all our custom k8s exceptions"""
class InvalidDeploymentScale(K8sException):
"""Deployment cannot be scaled to the requested scale"""
class DeploymentNotFound(K8sException):
"""Deployment not found"""
class DomainAlreadyConfigured(K8sException):
"""Domain is already configured in this ingress"""
"""p2 k8s api"""
from kubernetes.client import ApiClient
from kubernetes.config import load_incluster_config, load_kube_config
from kubernetes.config.config_exception import ConfigException
from structlog import get_logger
from p2.lib.config import CONFIG
LOGGER = get_logger()
MANAGED_BY = {
"app.kubernetes.io/managed-by": "k8s.p2.io"
}
FIELD_MANAGER = 'k8s.p2.io'
# pylint: disable=too-few-public-methods
class APIHelper:
"""Base class with helper methods. Automatically creates client
and provides current namespace."""
_namespace = ""
_client: ApiClient = None
def __init__(self):
try:
load_incluster_config()
except ConfigException:
try:
load_kube_config()
self._namespace = CONFIG.y('k8s_namespace')
except TypeError:
LOGGER.warning("Failed to load K8s configuration.")
return
self._client = ApiClient()
"""p2 k8s ingress Controller"""
from typing import Optional
from kubernetes.client import CoreV1Api, ExtensionsV1beta1Api
from kubernetes.client.models import (ExtensionsV1beta1HTTPIngressPath,
ExtensionsV1beta1HTTPIngressRuleValue,
ExtensionsV1beta1Ingress,
ExtensionsV1beta1IngressBackend,
ExtensionsV1beta1IngressRule)
from kubernetes.client.rest import ApiException
from structlog import get_logger
from p2.k8s.exceptions import DomainAlreadyConfigured
from p2.k8s.helper import FIELD_MANAGER, APIHelper
LOGGER = get_logger()
INGRESS_SELECTOR = "k8s.p2.io/main-ingress=true"
SERVICE_SELECTOR = "k8s.p2.io/component=%s"
class IngressController(APIHelper):
"""Add/remove domains from ingress"""
_extensions_client: ExtensionsV1beta1Api = None
_core_client: CoreV1Api = None
_name = ""
def __init__(self):
super().__init__()
self._extensions_client = ExtensionsV1beta1Api(self._client)
self._core_client = CoreV1Api(self._client)
try:
self._name = self._find_ingress()
except ApiException as exc:
LOGGER.warning("Failed to find ingress", error=exc)
return
def _find_ingress(self) -> str:
ingress = self._extensions_client.list_namespaced_ingress(
self._namespace, label_selector=INGRESS_SELECTOR).items[0]
return ingress.metadata.name
@property
def domains(self):
"""Get list of all domains assigned with this ingress"""
ingress = self._extensions_client.read_namespaced_ingress(self._name, self._namespace)
for rule in ingress.spec.rules:
for path in rule.http.paths:
if path.backend.service_name.endswith('tier0'):
print(f"Domain {rule.host} configured for tier0")
elif path.backend.service_name.endswith('web'):
print(f"Domain {rule.host} configured for web/s3")
def _get_service(self, service_label: str) -> Optional[str]:
services = self._core_client.list_namespaced_service(
self._namespace, label_selector=SERVICE_SELECTOR % service_label)
if not services.items:
return None
return services.items[0].metadata.name
def _check_if_domain_used(self, domain: str,
ingress: ExtensionsV1beta1Ingress, soft_fail=False) -> bool:
"""Check if domain is already used in this ingress.
If Domain is used, and soft_fail is set to False (default), a
DomainAlreadyConfigured error will be raised"""
# Make sure domain is not configured yet
for rule in ingress.spec.rules:
if rule.host == domain:
if soft_fail:
LOGGER.info("Domain already configured", domain=domain)
return False
raise DomainAlreadyConfigured
return True
def add_tier0_domain(self, domain, soft_fail=False) -> bool:
"""Add domain for use with tier0"""
ingress = self._extensions_client.read_namespaced_ingress(self._name, self._namespace)
if not self._check_if_domain_used(domain, ingress, soft_fail=soft_fail):
return False
ingress.spec.rules.append(
ExtensionsV1beta1IngressRule(
host=domain,
http=ExtensionsV1beta1HTTPIngressRuleValue(
paths=[
ExtensionsV1beta1HTTPIngressPath(
backend=ExtensionsV1beta1IngressBackend(
service_name=self._get_service('tier0'),
service_port='http'),
path='/'
)
]
)
)
)
self._extensions_client.patch_namespaced_ingress(
self._name, self._namespace, ingress, field_manager=FIELD_MANAGER)
LOGGER.debug("Successfully configured domain for tier0", domain=domain)
return True
def add_default_domain(self, domain, soft_fail=False) -> bool:
"""Add domain for use with S3 or web-ui"""
ingress = self._extensions_client.read_namespaced_ingress(self._name, self._namespace)
if not self._check_if_domain_used(domain, ingress, soft_fail=soft_fail):
return False
ingress.spec.rules.append(
ExtensionsV1beta1IngressRule(
host=domain,
http=ExtensionsV1beta1HTTPIngressRuleValue(
paths=[
ExtensionsV1beta1HTTPIngressPath(
backend=ExtensionsV1beta1IngressBackend(
service_name=self._get_service('web'),
service_port='http'),
path='/'
),
ExtensionsV1beta1HTTPIngressPath(
backend=ExtensionsV1beta1IngressBackend(
service_name=self._get_service('static'),
service_port='http'),
path='/_/static/'
)
]
)
)
)
self._extensions_client.patch_namespaced_ingress(
self._name, self._namespace, ingress, field_manager=FIELD_MANAGER)
LOGGER.debug("Successfully configured domain for general usage.", domain=domain)
return True
INGRESS_CONTROLLER = IngressController()
......@@ -142,7 +142,6 @@ INSTALLED_APPS = [
'p2.serve.apps.P2ServeConfig',
'p2.log.apps.P2LogConfig',
'p2.ui.apps.P2UIConfig',
'p2.k8s.apps.P2K8sConfig',
# p2 - Components
'p2.components.quota.apps.P2QuotaComponentConfig',
'p2.components.image.apps.P2ImageComponentConfig',
......
......@@ -13,7 +13,7 @@
<div class="container">
<div class="row justify-content-center">
<div class="col-6 py-md-3">
{% config 'external_auth_only' as external_auth_only %}
{% config_bool 'external_auth_only' as external_auth_only %}
<div class="card card-signin my-5">
<div class="card-body">
<h5 class="card-title text-center">{% trans "Sign In - p²" %}</h5>
......
......@@ -26,11 +26,19 @@ def model_app(context, expected=''):
return "active" if expected_app == app and expected_model == object_name else ""
return "active" if app == expected else ""
@register.simple_tag
def config(key, default=None):
"""Get config value"""
return CONFIG.y(key, default=default)
@register.simple_tag
def config_bool(key, default=None):
"""Get config value (casted to bool)"""
return CONFIG.y_bool(key, default=default)
@register.filter
def model_verbose_name(model):
"""Return model's verbose_name"""
......@@ -38,6 +46,7 @@ def model_verbose_name(model):
model = model.__class__
return model._meta.verbose_name
@register.filter
def get_attribute(blob, path):
"""Access blob.attributes but allow keys like 'site:bytes"""
......@@ -51,6 +60,7 @@ def startswith(text, starts):
return text.startswith(starts)
return False
@register.filter('json')
def json_pretty(obj):
"""Convert obj into pretty-printed JSON"""
......
......@@ -5,8 +5,6 @@ pyyaml
psycopg2
structlog
cherrypy
# K8s integration
kubernetes
# Monitoring
django-prometheus
py-grpc-prometheus
......