From ee212b1a3302e2dd5645ffc54b32bf66251a959f Mon Sep 17 00:00:00 2001 From: "Felix J. Ogris" Date: Thu, 20 Oct 2022 23:23:01 +0200 Subject: [PATCH] implemented S3 storage backend added sample configuration + aws php sdk version coding style cleanup --- CHANGELOG.md | 1 + CREDITS.md | 3 +- INSTALL.md | 39 ++++ cfg/conf.sample.php | 22 ++ composer.json | 3 +- lib/Configuration.php | 16 ++ lib/Data/S3Storage.php | 464 +++++++++++++++++++++++++++++++++++++++++ 7 files changed, 546 insertions(+), 2 deletions(-) create mode 100644 lib/Data/S3Storage.php diff --git a/CHANGELOG.md b/CHANGELOG.md index 15bb1b1f..bda29706 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * CHANGED: Avoid `SUPER` privilege for setting the `sql_mode` for MariaDB/MySQL (#919) * FIXED: Revert to CREATE INDEX without IF NOT EXISTS clauses, to support MySQL (#943) * FIXED: Apply table prefix to indexes as well, to support multiple instances sharing a single database (#943) + * ADDED: S3 Storage backend (#994) * **1.4 (2022-04-09)** * ADDED: Translations for Corsican, Estonian, Finnish and Lojban * ADDED: new HTTP headers improving security (#765) diff --git a/CREDITS.md b/CREDITS.md index c0be0ad2..d0035077 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -29,6 +29,7 @@ * rodehoed - option to exempt ips from the rate-limiter * Mark van Holsteijn - Google Cloud Storage backend * Austin Huang - Oracle database support +* Felix J. Ogris - S3 Storage backend ## Translations * Hexalyse - French @@ -58,4 +59,4 @@ * Markus Mikkonen - Finnish * Emir Ensar Rahmanlar - Turkish * Stevo984 - Slovak -* Christos Karamolegkos - Greek \ No newline at end of file +* Christos Karamolegkos - Greek diff --git a/INSTALL.md b/INSTALL.md index 615ce56e..f35d68bf 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -232,3 +232,42 @@ Platform using Google Cloud Run is easy and cheap. To use the Google Cloud Storage backend you have to install the suggested library using the command `composer require google/cloud-storage`. + +#### Using S3 Storage +Similar to Google Cloud Storage, you can choose S3 as storage backend. It uses +the AWS SDK for PHP, but can also talk to a Rados gateway as part of a CEPH +cluster. To use this backend, you first have to install the SDK in the +document root of PrivateBin: `composer require aws/aws-sdk-php`. You have to +create the S3 bucket on the CEPH cluster before using the S3 backend. + +In the `[model]` section of cfg/conf.php, set `class` to `S3Storage`. + +You can set any combination of the following options in the `[model_options]` +section: + + * region + * version + * endpoint + * bucket + * prefix + * accesskey + * secretkey + * use_path_style_endpoint + +By default, prefix is empty. If set, the S3 backend will place all PrivateBin +data beneath this prefix. + +For AWS, you have to provide at least `region`, `bucket`, `accesskey`, and +`secretkey`. + +For CEPH, follow this example: + +``` +region = "" +version = "2006-03-01" +endpoint = "https://s3.my-ceph.invalid" +use_path_style_endpoint = true +bucket = "my-bucket" +accesskey = "my-rados-user" +secretkey = "my-rados-pass" +``` diff --git a/cfg/conf.sample.php b/cfg/conf.sample.php index ab37da7b..cf465980 100644 --- a/cfg/conf.sample.php +++ b/cfg/conf.sample.php @@ -205,3 +205,25 @@ dir = PATH "data" ;usr = "privatebin" ;pwd = "Z3r0P4ss" ;opt[12] = true ; PDO::ATTR_PERSISTENT + +;[model] +; example of S3 configuration for Rados gateway / CEPH +;class = S3Storage +;[model_options] +;region = "" +;version = "2006-03-01" +;endpoint = "https://s3.my-ceph.invalid" +;use_path_style_endpoint = true +;bucket = "my-bucket" +;accesskey = "my-rados-user" +;secretkey = "my-rados-pass" + +;[model] +; example of S3 configuration for AWS +;class = S3Storage +;[model_options] +;region = "eu-central-1" +;version = "latest" +;bucket = "my-bucket" +;accesskey = "access key id" +;secretkey = "secret access key" diff --git a/composer.json b/composer.json index 0fd34559..01d384f2 100644 --- a/composer.json +++ b/composer.json @@ -30,7 +30,8 @@ "mlocati/ip-lib" : "1.18.0" }, "suggest" : { - "google/cloud-storage" : "1.26.1" + "google/cloud-storage" : "1.26.1", + "aws/aws-sdk-php" : "3.239.0" }, "require-dev" : { "phpunit/phpunit" : "^4.6 || ^5.0" diff --git a/lib/Configuration.php b/lib/Configuration.php index 9f4e35f7..fad44a27 100644 --- a/lib/Configuration.php +++ b/lib/Configuration.php @@ -157,6 +157,22 @@ class Configuration 'prefix' => 'pastes', 'uniformacl' => false, ); + } elseif ( + $section == 'model_options' && in_array( + $this->_configuration['model']['class'], + array('S3Storage') + ) + ) { + $values = array( + 'region' => null, + 'version' => null, + 'endpoint' => null, + 'accesskey' => null, + 'secretkey' => null, + 'use_path_style_endpoint' => null, + 'bucket' => null, + 'prefix' => '', + ); } // "*_options" sections don't require all defaults to be set diff --git a/lib/Data/S3Storage.php b/lib/Data/S3Storage.php new file mode 100644 index 00000000..d741e099 --- /dev/null +++ b/lib/Data/S3Storage.php @@ -0,0 +1,464 @@ + self::$_bucket, + 'Prefix' => $prefix, + ); + + do { + $objectsListResponse = self::$_client->listObjects($options); + $objects = $objectsListResponse['Contents'] ?? array(); + foreach ($objects as $object) { + $allObjects[] = $object; + $options['Marker'] = $object['Key']; + } + } while ($objectsListResponse['IsTruncated']); + + return $allObjects; + } + + /** + * returns the S3 storage object key for $pasteid in self::$_bucket. + * + * @access private + * @param $pasteid string to get the key for + * @return string + */ + private function _getKey($pasteid) + { + if (self::$_prefix != '') { + return self::$_prefix . '/' . $pasteid; + } + return $pasteid; + } + + /** + * Uploads the payload in the self::$_bucket under the specified key. + * The entire payload is stored as a JSON document. The metadata is replicated + * as the S3 object's metadata except for the fields attachment, attachmentname + * and salt. + * + * @param $key string to store the payload under + * @param $payload array to store + * @return bool true if successful, otherwise false. + */ + private function _upload($key, $payload) + { + $metadata = array_key_exists('meta', $payload) ? $payload['meta'] : array(); + unset($metadata['attachment'], $metadata['attachmentname'], $metadata['salt']); + foreach ($metadata as $k => $v) { + $metadata[$k] = strval($v); + } + try { + self::$_client->putObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $key, + 'Body' => Json::encode($payload), + 'ContentType' => 'application/json', + 'Metadata' => $metadata, + )); + } catch (S3Exception $e) { + error_log('failed to upload ' . $key . ' to ' . self::$_bucket . ', ' . + trim(preg_replace('/\s\s+/', ' ', $e->getMessage()))); + return false; + } + return true; + } + + /** + * @inheritDoc + */ + public function create($pasteid, array $paste) + { + if ($this->exists($pasteid)) { + return false; + } + + return $this->_upload($this->_getKey($pasteid), $paste); + } + + /** + * @inheritDoc + */ + public function read($pasteid) + { + try { + $object = self::$_client->getObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $this->_getKey($pasteid), + )); + $data = $object['Body']->getContents(); + return Json::decode($data); + } catch (S3Exception $e) { + error_log('failed to read ' . $pasteid . ' from ' . self::$_bucket . ', ' . + trim(preg_replace('/\s\s+/', ' ', $e->getMessage()))); + return false; + } + } + + /** + * @inheritDoc + */ + public function delete($pasteid) + { + $name = $this->_getKey($pasteid); + + try { + $comments = $this->_listAllObjects($name . '/discussion/'); + foreach ($comments as $comment) { + try { + self::$_client->deleteObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $comment['Key'], + )); + } catch (S3Exception $e) { + // ignore if already deleted. + } + } + } catch (S3Exception $e) { + // there are no discussions associated with the paste + } + + try { + self::$_client->deleteObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $name, + )); + } catch (S3Exception $e) { + // ignore if already deleted + } + } + + /** + * @inheritDoc + */ + public function exists($pasteid) + { + return self::$_client->doesObjectExistV2(self::$_bucket, $this->_getKey($pasteid)); + } + + /** + * @inheritDoc + */ + public function createComment($pasteid, $parentid, $commentid, array $comment) + { + if ($this->existsComment($pasteid, $parentid, $commentid)) { + return false; + } + $key = $this->_getKey($pasteid) . '/discussion/' . $parentid . '/' . $commentid; + return $this->_upload($key, $comment); + } + + /** + * @inheritDoc + */ + public function readComments($pasteid) + { + $comments = array(); + $prefix = $this->_getKey($pasteid) . '/discussion/'; + try { + $entries = $this->_listAllObjects($prefix); + foreach ($entries as $entry) { + $object = self::$_client->getObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $entry['Key'], + )); + $body = JSON::decode($object['Body']->getContents()); + $items = explode('/', $entry['Key']); + $body['id'] = $items[3]; + $body['parentid'] = $items[2]; + $slot = $this->getOpenSlot($comments, (int) $object['Metadata']['created']); + $comments[$slot] = $body; + } + } catch (S3Exception $e) { + // no comments found + } + return $comments; + } + + /** + * @inheritDoc + */ + public function existsComment($pasteid, $parentid, $commentid) + { + $name = $this->_getKey($pasteid) . '/discussion/' . $parentid . '/' . $commentid; + return self::$_client->doesObjectExistV2(self::$_bucket, $name); + } + + /** + * @inheritDoc + */ + public function purgeValues($namespace, $time) + { + $path = self::$_prefix; + if ($path != '') { + $path .= '/'; + } + $path .= 'config/' . $namespace; + + try { + foreach ($this->_listAllObjects($path) as $object) { + $name = $object['Key']; + if (strlen($name) > strlen($path) && substr($name, strlen($path), 1) !== '/') { + continue; + } + $head = self::$_client->headObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $name, + )); + if (array_key_exists('Metadata', $head) && array_key_exists('value', $head['Metadata'])) { + $value = $head['Metadata']['value']; + if (is_numeric($value) && intval($value) < $time) { + try { + self::$_client->deleteObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $name, + )); + } catch (S3Exception $e) { + // deleted by another instance. + } + } + } + } + } catch (S3Exception $e) { + // no objects in the bucket yet + } + } + + /** + * For S3, the value will also be stored in the metadata for the + * namespaces traffic_limiter and purge_limiter. + * @inheritDoc + */ + public function setValue($value, $namespace, $key = '') + { + $prefix = self::$_prefix; + if ($prefix != '') { + $prefix .= '/'; + } + + if ($key === '') { + $key = $prefix . 'config/' . $namespace; + } else { + $key = $prefix . 'config/' . $namespace . '/' . $key; + } + + $metadata = array('namespace' => $namespace); + if ($namespace != 'salt') { + $metadata['value'] = strval($value); + } + try { + self::$_client->putObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $key, + 'Body' => $value, + 'ContentType' => 'application/json', + 'Metadata' => $metadata, + )); + } catch (S3Exception $e) { + error_log('failed to set key ' . $key . ' to ' . self::$_bucket . ', ' . + trim(preg_replace('/\s\s+/', ' ', $e->getMessage()))); + return false; + } + return true; + } + + /** + * @inheritDoc + */ + public function getValue($namespace, $key = '') + { + $prefix = self::$_prefix; + if ($prefix != '') { + $prefix .= '/'; + } + + if ($key === '') { + $key = $prefix . 'config/' . $namespace; + } else { + $key = $prefix . 'config/' . $namespace . '/' . $key; + } + + try { + $object = self::$_client->getObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $key, + )); + return $object['Body']->getContents(); + } catch (S3Exception $e) { + return ''; + } + } + + /** + * @inheritDoc + */ + protected function _getExpiredPastes($batchsize) + { + $expired = array(); + $now = time(); + $prefix = self::$_prefix; + if ($prefix != '') { + $prefix .= '/'; + } + + try { + foreach ($this->_listAllObjects($prefix) as $object) { + $head = self::$_client->headObject(array( + 'Bucket' => self::$_bucket, + 'Key' => $object['Key'], + )); + if (array_key_exists('Metadata', $head) && array_key_exists('expire_date', $head['Metadata'])) { + $expire_at = intval($head['Metadata']['expire_date']); + if ($expire_at != 0 && $expire_at < $now) { + array_push($expired, $object['Key']); + } + } + + if (count($expired) > $batchsize) { + break; + } + } + } catch (S3Exception $e) { + // no objects in the bucket yet + } + return $expired; + } +}