...
 
Commits (3)
......@@ -13,11 +13,6 @@ variables:
P2_POSTGRESQL__USER: p2
P2_POSTGRESQL__PASSWORD: "EK-5jnKfjrGRm<77"
cache:
key: ${CI_JOB_STAGE}
paths:
- /cache/
before_script:
# Ensure all dependencies are installed, even those not included in p2/base
- pip install -r requirements.txt
......@@ -118,7 +113,7 @@ build-p2-server:
only:
- tags
- /^version/.*$/
build-p2-static:
build-static:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
......@@ -134,7 +129,7 @@ build-p2-static:
services:
- postgres:latest
- redis:latest
build-p2-tier0:
build-tier0:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
......@@ -146,7 +141,7 @@ build-p2-tier0:
only:
- tags
- /^version/.*$/
build-p2-operator:
build-operator:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
......@@ -158,3 +153,15 @@ build-p2-operator:
only:
- tags
- /^version/.*$/
build-docs:
stage: build
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
before_script:
- echo "{\"auths\":{\"docker.beryju.org\":{\"auth\":\"$DOCKER_AUTH\"}}}" > /kaniko/.docker/config.json
script:
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/docs/Dockerfile --destination docker.beryju.org/p2/docs:latest --destination docker.beryju.org/p2/docs:0.8.0-rc1
only:
- tags
- /^version/.*$/
......@@ -6,6 +6,9 @@ COPY ./requirements.txt /app/
WORKDIR /app/
ENV P2_POSTGRESQL__USER=p2
# CI Password, same as in .gitlab-ci.yml
ENV P2_POSTGRESQL__PASSWORD="EK-5jnKfjrGRm<77"
RUN ./manage.py collectstatic --no-input
FROM nginx:latest
......
......@@ -2,11 +2,16 @@
apiVersion: k8s.beryju.org/v1alpha1
kind: P2
metadata:
# Name of the instance.
name: example-p2
spec:
# Version of p2 to run. Can be set to a static version of `latest`, but downgrades are not supported.
version: 0.8.0rc1
# Secret key used to sign cookies, etc. Should be at least 50 characters long.
# Use something like https://passwordsgenerator.net/ to generate this
secret_key: "r-9k#x4tkk2e8%=(9hf#^v4&=5z2)^gzn^)l*_=z+&0a97kwd8"
# Same goes for these credentials, they should be randomly generated
redis:
password: "ThisIsNotASecurePassword!"
postgresql:
......@@ -16,10 +21,11 @@ spec:
# Enable error reporting (errors are sent to sentry.beryju.org)
error_reporting: true
# Only allow single sign-on, configured below
# Only allow single sign-on, disable internal authentication
external_auth_only: false
# OIDC Configuration
# SSO Configuration
# using the OpenID-Connect standard
# Callback URL: <base url>/_/oidc/callback/
oidc:
enabled: false
......@@ -29,23 +35,24 @@ spec:
token_url: ""
user_url: ""
# Determines how many instances of each component should be started.
deployment:
webInstances: 1
workerInstances: 1
webInstances: 1 # One web instance per CPU Core
workerInstances: 1 # This depends on your load, 1-2 workers are fine for most installs.
# To disable tier0, set the values below to 0
tier0Instances: 2
grpcInstances: 1
grpcInstances: 1 # One GRPC instance is enough for 8 tier0 instances.
ingress:
enabled: true
serve:
hosts:
- "i.p2.local"
- "i.p2.local" # Domains routed to tier0
hosts:
- "p2.local"
tls:
- secretName: example-p2-tls
hosts:
- i.p2.local
- p2.local
- "p2.local" # Domains routed to p2
- "*.p2.local" # Wildcard is used for domain-style s3 access.
# TLS Configuration is needed if you use cert-manager for example
# tls:
# - secretName: example-p2-tls
# hosts:
# - p2.local
#!/bin/bash
# p2 Install script
# Installs and updates a p2 instance using k3s and docker
P2_VERSION="0.8.0-rc1"
if [ "$EUID" -ne 0 ]; then
echo "Please run as root"
exit 1
fi
if ! [ -x "$(command -v curl)" ]; then
echo 'Error: curl is not installed. Please make sure curl is installed and executable.' >&2
exit 2
fi
# Create temporary folder and cd to it
TEMP_DIR=$(mktemp -d --suffix _p2)
cd "${TEMP_DIR}"
# kubectl helper functions, from https://github.com/zlabjp/kubernetes-scripts
function __is_pod_ready() {
[[ "$(kubectl get po -n p2 "$1" -o 'jsonpath={.status.conditions[?(@.type=="Ready")].status}') 2>/dev/null" == 'True' ]]
}
function __pods_ready() {
local pod
[[ "$#" == 0 ]] && return 0
for pod in $pods; do
__is_pod_ready "$pod" || return 1
done
return 0
}
function __wait_until_pods_ready() {
local i pods
while true; do
pods="$(kubectl get pods -n p2 -o 'jsonpath={.items[*].metadata.name}')"
if __pods_ready $pods; then
return 0
fi
echo " * Waiting for pods to be ready..."
sleep 5
done
}
# Make sure docker is installed
curl -fsSL https://get.docker.com -o install.docker.sh
bash install.docker.sh > /dev/null 2>&1
# Make sure K3s is installed
curl -sfL https://get.k3s.io -o install.k3s.sh
bash install.k3s.sh > /dev/null 2>&1
STORAGE_BASE="${STORAGE_BASE:-/srv/p2}"
# Make sure storage directories exist
mkdir -p "${STORAGE_BASE}"
curl -fsSL -o p2_k3s_storage.yaml "https://git.beryju.org/BeryJu.org/p2/raw/version/${P2_VERSION}/install/k3s-storage.yaml"
sed -i "s|%STORAGE_BASE%|${STORAGE_BASE}|g" p2_k3s_storage.yaml
mv p2_k3s_storage.yaml /var/lib/rancher/k3s/server/manifests/p2-20-storage.yaml
sleep 30
# TODO: Download crd and operator
# echo " * Your p2 instanace will be available at $INGRESS_HOST in a few minutes."
# echo " * You can use the username admin with password admin to login."
rm -r "${TEMP_DIR}"
# From https://github.com/rancher/local-path-provisioner
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["endpoints", "persistentvolumes", "pods"]
verbs: ["*"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.8
imagePullPolicy: Always
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Retain
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["%STORAGE_BASE%"]
}
]
}
/usr/local/bin/k3s-uninstall.sh
docker kill $(docker ps -q)
docker rm $(docker ps -a -q)
docker rmi $(docker images -q)
rm -rf /srv/p2/
rm -rf /var/lib/docker
apt remove --purge docker* -y && apt autoremove --purge -y
FROM python:3.7-alpine as builder
WORKDIR /mkdocs
COPY docs/ docs
COPY mkdocs.yml .
RUN apk add git && \
pip install mkdocs && \
pip install git+https://github.com/BeryJu/mkdocs-bootstrap4.git && \
mkdocs build
FROM nginx
COPY --from=builder /mkdocs/site /usr/share/nginx/html
# Expiry
The Expiry component allows you to create Blobs which are automatically deleted at a certain date/time.
To use expiry on a Blob, enable the Component on your Volume and tag the Blob with the Following tag:
`component.p2.io/expiry/date`
The value is a Unix-Timestamp. The Blob will be deleted as soon as the time has been reached.
# Image-attribute Scanning
The Image Attribute Scanning Component scans and extracts EXIF Data from compatible images (jpeg and tiff). The following attributes are currently extracted:
* Image Height
* Image Width
* Compression
* Orientation
* Camera Model
* Software
# Public Access
Public Access automatically enables Anonymous Read access to all new Blobs in the current Volume.
# Quota
Quota allows you to limit he Size of a Volume. You can configure a size threshold and an action which p2 will execute. Currently supported actions are:
- Do Nothing
- Shows warning in UI
- No alerts
- Prevent Further Uploads
- Prevents new Uploads to Volume
- Existing Blobs can still be updated
- Warning in UI is still shown
- Send E-Mail to uploader and admin
- A Warning E-Mail will be sent to the User uploading the Blob and all Admins as well.
- Warning in UI is still shown.
# Replication
The replication Component replicates Blobs from one Volume to another. This happens in a push-method, rather than pull.
**This feature is under development and might not behave as you imagine it.**
You can optionally specify an offset by which Operations will be delayed, making it possible to use this as a backup.
You can also ignore Blobs matching a certain pattern, for example only replicate files matching not .iso
# Welcome
Welcome to the p2 Documentation. p2 is an open-source Object Storage Server, focused on simple and quick
sharing. It allows you to quickly share files with people. It also offers an S3-Compatible API, which
allows you to easily integrate other software with p2.
p2 uses the following Terminology:
### Storage
A Storage represents a way p2 stores data. For example, this might be a LocalStorageController instance,
which saves data on a locally mounted drive. There is for example also a S3StorageController class,
which allows you to use S3 or an S3-compatible backend to store data.
### Volume
Logical Groupings of data, can be compared with an S3 Bucket
### Component
Single Features which can be enabled on a per-Volume basis.
### tier0
tier0 is the component which accelerates serving of your Blobs. It also allows to match custom URLs based on Regular Expressions and caches Blobs.
# Installation
This guide expects you to have a fully-configured Kubernetes cluster. If you want to run p2 on a single server, read [this](single-node-install.md) first.
## Operator
p2 uses an operator to manage itself. Execute `kubectl apply -f https://git.beryju.org/BeryJu.org/p2/raw/master/deploy/operator.yaml` to install the Operator.
To verify that the operator has successfully been installed and is running, check the output of `kubectl get pod`.
```
NAME READY STATUS RESTARTS AGE
p2-operator-5bc6bcf5c7-qtsp2 1/1 Running 0 98s
```
## Instance
Now to create the actual p2 instance, download [example](https://git.beryju.org/BeryJu.org/p2/raw/master/deploy/example-instance.yaml) instance definition and change it to your needs.
After you've change the YAML to your liking, create the instance with the following command:
```
kubectl apply -f example-instance.yaml
```
The actually bootstrapping of the instance can take a few minutes. Run this command to watch the progress: `watch kubectl get pods`
Once the output looks something like this, your p2 install is ready to use.
```
NAME READY STATUS RESTARTS AGE
example-p2-18vtwme7xxcdin9copwgnnz13-grpc-b76c8b87c-jhv98 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-postgresql-0 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-redis-master-0 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-redis-slave-776bd5569h7ttx 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-static-659f977dc4-8sx5m 2/2 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-tier0-77f7694798-4d776 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-tier0-77f7694798-f5tc9 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-web-77f44bd466-ngslj 1/1 Running 0 10m
example-p2-18vtwme7xxcdin9copwgnnz13-worker-6c69d985b-l8vdd 1/1 Running 0 10m
p2-operator-5bc6bcf5c7-qtsp2 1/1 Running 0 34m
```
Access your p2 install under the domain(s) configured. The default login credentials are `admin/admin`.
# Single-Node Installation
Since p2 is built with Kubernetes integration, the only supported method to run p2 is within such a cluster. To run p2 on a single node, it is recommended to use [k3s](https://k3s.io). This page is a short guide on how to prepare your single node for p2.
## Requirements
### Hardware
| | With tier0 | Without tier0 |
|--|------------|---------------|
| CPU | 2 Cores | 2 Cores |
| RAM | 2 GB | 4 GB |
| Disk | At least 20 GB recommended + your data |
## Installing k3s
Installing k3s is very easy. Simply run the following script on your node to install the Cluster.
```
curl -sfL https://get.k3s.io | sh -
```
After the script is done, you should be able to run `kubectl get node` and see one ready node:
```
NAME STATUS ROLES AGE VERSION
p2-test-vm Ready master 13s v1.14.5-k3s.1
```
Congratulations, you now have a single-node Kubernetes cluster. By default, k3s installs traefik as `Ingress Controller` (= Reverse Proxy). There is however no default Persistent Volume Provisioner, which means we need the following component:
## Installing local-path-provisioner
This tool allows Kubernetes to dynamically allocate "Volumes", pointing to a local path. Download the install-manifest as following:
```
wget https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
```
The default base-path is `/opt/local-path-provisioner`. If you wish to change that path, download the YAML file and edit it to your needs.
Once you're done, apply the manifest with this command: `kubectl apply -f local-path-storage.yaml`.
To check that the provisioner has successfully been installed and is running, execture `kubectl -n local-path-storage get pod`. The output should look something like this:
```
NAME READY STATUS RESTARTS AGE
local-path-provisioner-848fdcff-h4l68 1/1 Running 0 10s
```
By default, the local-path-provisioner is not set as default. To change that, run
```
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
```
Now that you have a fully-prepared Kubernetes cluster, you can continue with the normal Installation instructions [here](install.md)
# Migrating from pyazo
To migrate from pyazo to p2, we're going to use the S3-API to mass-import Blobs and copy a matching tier0 Policy.
## Prerequisites
* A pyazo install (any version)
* A full-configured p2 install (0.1.16+)
* Enough free Space to store all of pyazo's Blobs
* Check with the following command on the Server pyazo is running on
* du -sh /usr/share/pyazo/media
* An Administrative Account on the pyazo Server
* An API Key in p2
Recommended, but not required:
* A dedicated volume to import these Blobs into
## Preparation
To migrate the data, we need the the AWS-CLI Client ([https://aws.amazon.com/cli/](https://aws.amazon.com/cli/)), so install it on the pyazo host as follows:
`sudo pip install awscli`
If that doesn't work for some reason, try the bundled installer: [https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html](https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html)
To make sure it is working correctly, execute the following command to configure your account.
`aws configure`
In the prompt asking you for an AWS Access Key, input your p2 Access Key. Same goes for the Secret Access Key. When asked for a region, you can input anything, since p2 doesn't use this field currently.
Now that your AWS-CLI is setup correctly, let's make sure it can interact with p2 correctly. Execute the following command, substituting p2-URL with the URL to your install.
`aws --endpoint-url https://<p2-URL> s3 ls`
The result should look something like this:
2006-02-03 16:45:09 pyazo-import-test
2006-02-03 16:45:09 some-other-volume
## Migrating the data
To actually migrate the data, we use AWS-CLI's `cp` Command, which recursively copies all files.
Run the following in `/usr/share/pyazo/media` to start the copy process:
`aws --endpoint-url https://<p2-URL> s3 cp . s3://<volume-name> --recursive --exclude thumbnails/`
This will import all of your data into p2. You can run this command multiple times without creating duplicate objects. 
## Migrating the URLs
p2 uses a new System to allow you to use URLs of pretty much any format. By default, files will be accessible by their absolute path, e.g.
`https://<p2 URL>/<volume>/<blob path>`
This will obviously only return the file if the current user has Permissions to read the Blob.
To preserve the old URLs, which match based on File Hash, you need to create one or more tier0 Policies. A tier0 Policy consists of two parts:
* Tags, which are used to match against the current Request, and determine when the tier0 Policy is triggered.
* A Blob Query, which is used to lookup a Blob from the Database based on the Request.
Depending on which setting you used for `default_return_view`, you can create a tier0 Policy based on the table below.
| Setting in pyazo | Tags | Blob Query |
|---|---|---|
|`view_md5` | `serve.p2.io/match/path/relative: ([A-Fa-f0-9]{32})(\.?[a-zA-Z0-9]*)` | `attributes__blob.p2.io/hash/md5={path_relative}&volume__name=images` |
| `view_sha512_short` | `serve.p2.io/match/path/relative: ([A-Fa-f0-9]{16})(\.?[a-zA-Z0-9]*)` | `attributes__blob.p2.io/hash/sha512__startswith={path_relative}&volume__name=images`
|
| `view_sha256` | `serve.p2.io/match/path/relative: ([A-Fa-f0-9]{64})(\.?[a-zA-Z0-9]*)` | `attributes__blob.p2.io/hash/sha256={path_relative}&volume__name=images` |
| `view_sha512` | `serve.p2.io/match/path/relative: ([A-Fa-f0-9]{128})(\.?[a-zA-Z0-9]*)` | `attributes__blob.p2.io/hash/sha512={path_relative}&volume__name=images` |
Final Steps
-----------
To finalise the migration to p2, you should take a look at the following optional components:
- [Public Access](components/public-access.md)
- [Image-attribute Scanning](components/image-attribute-scanning.md)
site_name: p2 Docs
nav:
- Home: index.md
- Installation:
- Single-Node Installation: installation/single-node-install.md
- Installation: installation/install.md
- Components:
- Image-attribute Scanning: components/image-attribute-scanning.md
- Quota: components/quota.md
- Public Access: components/public-access.md
- Replication: components/replication.md
- Expiry: components/expiry.md
- Migration:
- Migrating from pyazo: migrating-from-pyazo.md
repo_url: https://git.beryju.org/BeryJu.org/p2
theme:
name: bootstrap4
FROM quay.io/operator-framework/helm-operator:v0.8.1
# Since kaniko seems to have issues with quay.io for some reason, the image is replicated to docker.beryju.org
FROM docker.beryju.org/k8s/helm-operator:v0.8.1
COPY watches.yaml ${HOME}/watches.yaml
COPY helm-charts/ ${HOME}/helm-charts/