You are viewing a plain text version of this content. The canonical link for it is here.
Posted to server-dev@james.apache.org by ro...@apache.org on 2020/05/28 16:25:04 UTC

[james-project] branch master updated: JAMES-3157 Add support for bench profiles

This is an automated email from the ASF dual-hosted git repository.

rouazana pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/james-project.git


The following commit(s) were added to refs/heads/master by this push:
     new a959ae3  JAMES-3157 Add support for bench profiles
a959ae3 is described below

commit a959ae39229fdc35a4db8f5319cefeae6b0a1551
Author: Gautier DI FOLCO <gd...@linagora.com>
AuthorDate: Mon Apr 27 12:11:33 2020 +0200

    JAMES-3157 Add support for bench profiles
---
 JenkinsfileStressTests.groovy | 44 ++++++++++++++++----
 benchmarks/s3.properties      | 71 ++++++++++++++++++++++++++++++++
 benchmarks/swift.properties   | 96 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 202 insertions(+), 9 deletions(-)

diff --git a/JenkinsfileStressTests.groovy b/JenkinsfileStressTests.groovy
index 7d5b6cc..213e7fd 100644
--- a/JenkinsfileStressTests.groovy
+++ b/JenkinsfileStressTests.groovy
@@ -43,6 +43,11 @@ pipeline {
                             sh "cp -r server/container/cli/target/james-server-cli.lib dockerfiles/run/guice/cassandra-rabbitmq/destination"
                             sh 'cp server/protocols/jmap-draft-integration-testing/rabbitmq-jmap-draft-integration-testing/src/test/resources/keystore dockerfiles/run/guice/cassandra-rabbitmq/destination/conf'
                             sh 'wget -O dockerfiles/run/guice/cassandra-rabbitmq/destination/glowroot.zip https://github.com/glowroot/glowroot/releases/download/v0.13.4/glowroot-0.13.4-dist.zip && unzip -u dockerfiles/run/guice/cassandra-rabbitmq/destination/glowroot.zip -d dockerfiles/run/guice/cassandra-rabbitmq/destination'
+
+                            if (params.PROFILE in ["s3", "swift"]) {
+                                sh 'cp benchmarks/' + params.PROFILE + '.properties dockerfiles/run/guice/cassandra-rabbitmq/destination/conf/blob.properties'
+                            }
+
                             sh 'docker build -t james_run dockerfiles/run/guice/cassandra-rabbitmq'
                         }
                     }
@@ -55,21 +60,42 @@ pipeline {
                                 echo 'Last build failed, cleaning provisionning'
                                 sh 'sudo btrfs subvolume delete /srv/bench-running-docker'
                             }
-                            sh "cd /srv && sudo btrfs subvolume snapshot bench-snapshot bench-running-docker"
-                            sh 'docker run -d --name=cassandra -p 9042:9042 -v /srv/bench-running-docker/cassandra:/var/lib/cassandra cassandra:3.11.3'
-                            sh 'docker run -d --name=elasticsearch -p 9200:9200 -v /srv/bench-running-docker/elasticsearch:/usr/share/elasticsearch/data/elasticsearch  --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:6.3.2'
-                            sh 'docker run -d --name=tika apache/tika:1.24'
-                            sh 'docker run -d --name=swift -p 8080:8080 -v /srv/bench-running-docker/swift:/srv/1/node/sdb1 jeantil/openstack-keystone-swift:pike'
-                            sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
+                            switch (params.PROFILE) {
+                                case "reference":
+                                    sh "cd /srv && sudo btrfs subvolume snapshot bench-snapshot bench-running-docker"
+                                    sh 'docker run -d --name=cassandra -p 9042:9042 -v /srv/bench-running-docker/cassandra:/var/lib/cassandra cassandra:3.11.3'
+                                    sh 'docker run -d --name=elasticsearch -p 9200:9200 -v /srv/bench-running-docker/elasticsearch:/usr/share/elasticsearch/data/elasticsearch  --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:6.3.2'
+                                    sh 'docker run -d --name=tika apache/tika:1.24'
+                                    sh 'docker run -d --name=swift -p 8080:8080 -v /srv/bench-running-docker/swift:/srv/1/node/sdb1 jeantil/openstack-keystone-swift:pike'
+                                    sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
+
+                                    sh 'docker run -d --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link tika:tika --link swift:swift --name james_run -t james_run'
+                                    break
+                                case ["s3", "swift"]:
+                                    sh 'docker run -d --name=cassandra -p 9042:9042 cassandra:3.11.3'
+                                    sh 'docker run -d --name=elasticsearch -p 9200:9200 --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:6.3.2'
+                                    sh 'docker run -d --name=tika apache/tika:1.24'
+                                    sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
 
-                            sh 'docker run -d --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link tika:tika --link swift:swift --name james_run -t james_run'
+                                    sh 'printenv | grep OS_ > env.file'
+                                    sh 'docker run -d --env-file env.file --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link tika:tika --name james_run -t james_run'
+                                    break
+                            }
+                            def jamesCliWithOptions = 'java -jar /root/james-cli.jar -h 127.0.0.1 -p 9999'
                             timeout(time: 20, unit: 'MINUTES') {
                                 retry(200) {
                                     sleep 5
-                                    def jamesCliWithOptions = 'java -jar /root/james-cli.jar -h 127.0.0.1 -p 9999'
                                     sh "docker exec james_run ${jamesCliWithOptions} listusers"
                                 }
                             }
+                            if (params.PROFILE in ["s3", "swift"]) {
+                                sh "docker exec james_run ${jamesCliWithOptions} removedomain localhost"
+                                sh "docker exec james_run ${jamesCliWithOptions} removedomain james.linagora.com"
+                                sh "docker exec james_run ${jamesCliWithOptions} adddomain open-paas.org"
+                                for (int n = 0; n <= 100; n++) {
+                                    sh "docker exec james_run ${jamesCliWithOptions} adduser user${n}@open-paas.org secret"
+                                }
+                            }
                         }
                     }
                 }
@@ -97,7 +123,7 @@ pipeline {
                 script {
                     sh 'docker logs james_run || true'
                     sh 'docker rm -f cassandra rabbitmq elasticsearch tika swift james_run || true'
-                    sh 'sudo btrfs subvolume delete /srv/bench-running-docker'
+                    sh 'sudo btrfs subvolume delete /srv/bench-running-docker || true'
                 }
             }
         }
diff --git a/benchmarks/s3.properties b/benchmarks/s3.properties
new file mode 100644
index 0000000..9962979
--- /dev/null
+++ b/benchmarks/s3.properties
@@ -0,0 +1,71 @@
+# ============================================= BlobStore Implementation ==================================
+# Read https://james.apache.org/server/config-blobstore.html for further details
+
+# Choose your BlobStore implementation
+# Mandatory, allowed values are: cassandra, objectstorage, hybrid
+# hybrid is using both objectstorage for unfrequently read or big blobs & cassandra for small, often read blobs
+implementation=objectstorage
+
+# ============================================== ObjectStorage ============================================
+
+# ========================================= ObjectStorage Codec ======================================
+# Codec for blob contents
+# Mandatory, allowed values are: DEFAULT, AES256
+# DEFAULT: no encryption
+# AES256: AES-256 encryption
+objectstorage.payload.codec=DEFAULT
+
+# ===================================== ObjectStorage AES256 Encryption ====================================
+# Mandatory if you choose AES256 encryption, salt value in string for the encryption
+# objectstorage.aes256.hexsalt=salt
+
+# Mandatory if you choose AES256 encryption, password for the encryption
+# objectstorage.aes256.password=password
+
+# ========================================= ObjectStorage providers ========================================
+# Choosing blob storage service
+# Mandatory, Allowed values are: swift, aws-s3
+objectstorage.provider=aws-s3
+
+# ========================================= ObjectStorage Buckets ==========================================
+# bucket names prefix
+# Optional, default no prefix
+# objectstorage.bucketPrefix=prod-
+
+# Default bucket name
+# Optional, default is bucketPrefix + `default`
+objectstorage.namespace=${env:OS_NAMESPACE}
+
+
+# ========================================= ObjectStorage on S3 =============================================
+objectstorage.s3.region=${env:OS_S3_REGION}
+objectstorage.s3.endPoint=${env:OS_S3_ENDPOINT}
+objectstorage.s3.accessKeyId=${env:OS_S3_ACCESSKEYID}
+objectstorage.s3.secretKey=${env:OS_S3_SECRETKEY}
+
+# ============================================ Blobs Exporting ==============================================
+# Read https://james.apache.org/server/config-blob-export.html for further details
+
+# Choosing blob exporting mechanism, allowed mechanism are: localFile, linshare
+# LinShare is a file sharing service, will be explained in the below section
+# Optional, default is localFile
+blob.export.implementation=localFile
+
+# ======================================= Local File Blobs Exporting ========================================
+# Optional, directory to store exported blob, directory path follows James file system format
+# default is file://var/blobExporting
+blob.export.localFile.directory=file://var/blobExporting
+
+# ======================================= LinShare File Blobs Exporting ========================================
+# LinShare is a sharing service where you can use james, connects to an existing LinShare server and shares files to
+# other mail addresses as long as those addresses available in LinShare. For example you can deploy James and LinShare
+# sharing the same LDAP repository
+# Mandatory if you choose LinShare, url to connect to LinShare service
+# blob.export.linshare.url=http://linshare:8080
+
+# ======================================= LinShare Configuration BasicAuthentication ===================================
+# Authentication is mandatory if you choose LinShare, TechnicalAccount is need to connect to LinShare specific service.
+# For Example: It will be formalized to 'Authorization: Basic {Credential of UUID/password}'
+
+# blob.export.linshare.technical.account.uuid=Technical_Account_UUID
+# blob.export.linshare.technical.account.password=password
diff --git a/benchmarks/swift.properties b/benchmarks/swift.properties
new file mode 100644
index 0000000..ea990ca
--- /dev/null
+++ b/benchmarks/swift.properties
@@ -0,0 +1,96 @@
+# ============================================= BlobStore Implementation ==================================
+# Read https://james.apache.org/server/config-blobstore.html for further details
+
+# Choose your BlobStore implementation
+# Mandatory, allowed values are: cassandra, objectstorage, hybrid
+# hybrid is using both objectstorage for unfrequently read or big blobs & cassandra for small, often read blobs
+implementation=objectstorage
+
+# ============================================== ObjectStorage ============================================
+
+# ========================================= ObjectStorage Codec ======================================
+# Codec for blob contents
+# Mandatory, allowed values are: DEFAULT, AES256
+# DEFAULT: no encryption
+# AES256: AES-256 encryption
+objectstorage.payload.codec=DEFAULT
+
+# ===================================== ObjectStorage AES256 Encryption ====================================
+# Mandatory if you choose AES256 encryption, salt value in string for the encryption
+# objectstorage.aes256.hexsalt=salt
+
+# Mandatory if you choose AES256 encryption, password for the encryption
+# objectstorage.aes256.password=password
+
+# ========================================= ObjectStorage providers ========================================
+# Choosing blob storage service
+# Mandatory, Allowed values are: swift, aws-s3
+objectstorage.provider=swift
+
+# ========================================= ObjectStorage Buckets ==========================================
+# bucket names prefix
+# Optional, default no prefix
+# objectstorage.bucketPrefix=prod-
+
+# Default bucket name
+# Optional, default is bucketPrefix + `default`
+objectstorage.namespace=${env:OS_NAMESPACE}
+
+# ========================================= ObjectStorage on Swift =========================================
+# Specify the geography region which the BlobStore will connect to a in Swift ObjectStorage
+# Optional, possible values are those of your Swift service
+# objectstorage.swift.region=HKG
+
+# Specify the authentication mechanism of Swift
+# Mandatory, allowed values are: tmpauth, keystone2, keystone3
+objectstorage.swift.authapi=keystone3
+
+# Mandatory, authentication endpoint
+objectstorage.swift.endpoint=${env:OS_SWIFT_ENDPOINT}
+
+# Openstack Swift authentication mechanisms require you to pass the user information
+# to get the access tokens. User information usually include two parts:
+# identity: user identification like username
+# credential: like password, here it is the Secret Access Key in Jcloud
+# Mandatory
+objectstorage.swift.credentials=${env:OS_SWIFT_CREDENTIALS}
+
+# =================================== ObjectStorage on Swift with Temp Auth ================================
+# Mandatory, authentication identity, the identity contains two part, username and tenantname
+# Should be the username & tenant name configured in Swift
+# Mandatory
+objectstorage.swift.keystone3.user.name=${env:OS_SWIFT_USER_NAME}
+# Mandatory
+objectstorage.swift.keystone3.user.domain=${env:OS_SWIFT_USER_DOMAIN}
+
+# ================================ ObjectStorage on Swift with Keystone 3 Auth ==============================
+objectstorage.swift.keystone3.scope.domainid=${env:OS_SWIFT_DOMAIN_ID}
+objectstorage.swift.keystone3.scope.project.domainid=${env:OS_SWIFT_PROJECT_DOMAIN_ID}
+objectstorage.swift.keystone3.scope.project.name=${env:OS_SWIFT_PROJECT_DOMAIN_NAME}
+
+# ============================================ Blobs Exporting ==============================================
+# Read https://james.apache.org/server/config-blob-export.html for further details
+
+# Choosing blob exporting mechanism, allowed mechanism are: localFile, linshare
+# LinShare is a file sharing service, will be explained in the below section
+# Optional, default is localFile
+blob.export.implementation=localFile
+
+# ======================================= Local File Blobs Exporting ========================================
+# Optional, directory to store exported blob, directory path follows James file system format
+# default is file://var/blobExporting
+blob.export.localFile.directory=file://var/blobExporting
+
+# ======================================= LinShare File Blobs Exporting ========================================
+# LinShare is a sharing service where you can use james, connects to an existing LinShare server and shares files to
+# other mail addresses as long as those addresses available in LinShare. For example you can deploy James and LinShare
+# sharing the same LDAP repository
+# Mandatory if you choose LinShare, url to connect to LinShare service
+# blob.export.linshare.url=http://linshare:8080
+
+# ======================================= LinShare Configuration BasicAuthentication ===================================
+# Authentication is mandatory if you choose LinShare, TechnicalAccount is need to connect to LinShare specific service.
+# For Example: It will be formalized to 'Authorization: Basic {Credential of UUID/password}'
+
+# blob.export.linshare.technical.account.uuid=Technical_Account_UUID
+# blob.export.linshare.technical.account.password=password


---------------------------------------------------------------------
To unsubscribe, e-mail: server-dev-unsubscribe@james.apache.org
For additional commands, e-mail: server-dev-help@james.apache.org