You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@james.apache.org by bt...@apache.org on 2021/06/26 01:08:40 UTC

[james-project] 02/23: JAMES-3603 Remove benchmarks

This is an automated email from the ASF dual-hosted git repository.

btellier pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/james-project.git

commit 7002e5114e434e1f5e83980e3a620d735de7d4e5
Author: Benoit Tellier <bt...@linagora.com>
AuthorDate: Wed Jun 23 10:56:30 2021 +0700

    JAMES-3603 Remove benchmarks
    
         - Requires complex setup (btrfs, Jenkins 2, etc...) and setting it
    up is undocumented
         - Provisioning the email corpus is undocumented
         - The Gatling-Job had not been contributed to the Apache project
         - This launches all components of the distributed server on a
    single machine and as such does not represent real world performance,
    where all the building blocks are on different machines
         - Not integrated to Apache processes of any kind.
    
    As such, as this seems unusable for any member of the community as-is,
    as Linagora practices moved toward testing on a real Kubernetes cluster,
    I propose to remove it rather than attempt to fix it.
---
 JenkinsfileStressTests.groovy | 166 ------------------------------------------
 benchmarks/s3.properties      |  81 ---------------------
 2 files changed, 247 deletions(-)

diff --git a/JenkinsfileStressTests.groovy b/JenkinsfileStressTests.groovy
deleted file mode 100644
index d5183f7..0000000
--- a/JenkinsfileStressTests.groovy
+++ /dev/null
@@ -1,166 +0,0 @@
-// This file should respect the Jenkinsfile format describe here: 
-// https://jenkins.io/doc/book/pipeline/jenkinsfile/
-// 
-// It may be used by any Jenkins instance you own.
-//
-// In order to work properly, it requires the following parameters:
-// - SCENARIO: the Gatling scenario you want to play (ex. com.linagora.gatling.imap.scenario.ImapSimpleScenario)
-
-// Method in order to retry a command.
-// It may be used to wait for a service to be available.
-
-pipeline {
-    options {
-        disableConcurrentBuilds()
-    }
-
-    agent none
-
-    stages {
-        stage('Prepare target') {
-            agent {
-                node {
-                    label 'target'
-                }
-            }
-
-            tools {
-                maven 'maven'
-            }
-
-            stages {
-                stage('Compile') {
-                    steps {
-                        sh "mvn clean install -T1C -DskipTests -Dmaven.javadoc.skip=true"
-                    }
-                }
-                stage('Build image') {
-                    steps {
-                        script {
-                            sh "cp server/container/guice/cassandra-rabbitmq-guice/target/james-server-cassandra-rabbitmq-guice.jar dockerfiles/run/guice/cassandra-rabbitmq/destination"
-                            sh "cp -r server/container/guice/cassandra-rabbitmq-guice/target/james-server-cassandra-rabbitmq-guice.lib dockerfiles/run/guice/cassandra-rabbitmq/destination"
-                            sh "cp server/container/cli/target/james-server-cli.jar dockerfiles/run/guice/cassandra-rabbitmq/destination"
-                            sh "cp -r server/container/cli/target/james-server-cli.lib dockerfiles/run/guice/cassandra-rabbitmq/destination"
-                            sh 'cp server/protocols/jmap-draft-integration-testing/rabbitmq-jmap-draft-integration-testing/src/test/resources/keystore dockerfiles/run/guice/cassandra-rabbitmq/destination/conf'
-                            sh 'wget -O dockerfiles/run/guice/cassandra-rabbitmq/destination/glowroot.zip https://github.com/glowroot/glowroot/releases/download/v0.13.4/glowroot-0.13.4-dist.zip && unzip -u dockerfiles/run/guice/cassandra-rabbitmq/destination/glowroot.zip -d dockerfiles/run/guice/cassandra-rabbitmq/destination'
-
-                            if (params.PROFILE == "s3") {
-                                sh 'cp benchmarks/' + params.PROFILE + '.properties dockerfiles/run/guice/cassandra-rabbitmq/destination/conf/blob.properties'
-                            }
-
-                            sh 'docker build -t james_run dockerfiles/run/guice/cassandra-rabbitmq'
-                        }
-                    }
-                }
-                stage('Start James') {
-                    steps {
-                        script {
-                            sh 'docker rm -f cassandra rabbitmq elasticsearch tika s3 james_run || true'
-                            if (fileExists('/srv/bench-running-docker')) {
-                                echo 'Last build failed, cleaning provisionning'
-                                sh 'sudo btrfs subvolume delete /srv/bench-running-docker'
-                            }
-                            switch (params.PROFILE) {
-                                case "reference":
-                                    sh "cd /srv && sudo btrfs subvolume snapshot bench-snapshot-s3 bench-running-docker"
-                                    sh 'docker run -d --name=cassandra -p 9042:9042 -v /srv/bench-running-docker/cassandra:/var/lib/cassandra cassandra:3.11.10'
-
-                                    sh 'sleep 10'
-
-                                    sh 'docker run -d --name=elasticsearch -p 9200:9200 -v /srv/bench-running-docker/elasticsearch:/usr/share/elasticsearch/data  --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.10.2'
-
-                                    sh 'sleep 10'
-
-                                    sh 'docker run -d --name=tika apache/tika:1.24'
-
-                                    sh 'sleep 10'
-
-                                    sh 'docker run -d --name=s3 --env "REMOTE_MANAGEMENT_DISABLE=1" --env "SCALITY_ACCESS_KEY_ID=accessKey1" --env "SCALITY_SECRET_ACCESS_KEY=secretKey1" -v /srv/bench-running-docker/s3/localData:/usr/src/app/localData -v /srv/bench-running-docker/s3/localMetadata:/usr/src/app/localMetadata zenko/cloudserver:8.2.6'
-
-                                    sh 'sleep 10'
-
-                                    sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
-
-                                    sh 'sleep 10'
-
-                                    sh 'docker run -d --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link tika:tika --link s3:s3.docker.test --name james_run -t james_run'
-                                    break
-                                case "s3-local":
-                                    sh 'docker run -d --name=cassandra -p 9042:9042 cassandra:3.11.10'
-                                    sh 'docker run -d --name=elasticsearch -p 9200:9200 --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.10.2'
-                                    sh 'docker run -d --name=tika apache/tika:1.24'
-                                    sh 'docker run -d --env "REMOTE_MANAGEMENT_DISABLE=1" --env "SCALITY_ACCESS_KEY_ID=accessKey1" --env "SCALITY_SECRET_ACCESS_KEY=secretKey1" --name=s3 zenko/cloudserver:8.2.6'
-                                    sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
-
-                                    sh 'docker run -d --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link s3:s3.docker.test --link tika:tika --name james_run -t james_run'
-                                    break
-                                case "s3":
-                                    sh 'docker run -d --name=cassandra -p 9042:9042 cassandra:3.11.10'
-                                    sh 'docker run -d --name=elasticsearch -p 9200:9200 --env "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.10.2'
-                                    sh 'docker run -d --name=tika apache/tika:1.24'
-                                    sh 'docker run -d --name=rabbitmq -p 15672:15672 -p 5672:5672 rabbitmq:3.8.1-management'
-
-                                    sh 'printenv | grep OS_ > env.file'
-                                    sh 'docker run -d --env-file env.file --hostname HOSTNAME -p 25:25 -p 1080:80 -p 8000:8000 -p 110:110 -p 143:143 -p 465:465 -p 587:587 -p 993:993 --link cassandra:cassandra --link rabbitmq:rabbitmq --link elasticsearch:elasticsearch --link tika:tika --name james_run -t james_run'
-                                    break
-                            }
-                            def jamesCliWithOptions = 'java -jar /root/james-cli.jar -h 127.0.0.1 -p 9999'
-                            timeout(time: 20, unit: 'MINUTES') {
-                                retry(200) {
-                                    sleep 5
-                                    sh "docker exec james_run ${jamesCliWithOptions} listusers"
-                                }
-                            }
-                            if (params.PROFILE == "s3") {
-                                sh "docker exec james_run ${jamesCliWithOptions} removedomain localhost || true"
-                                sh "docker exec james_run ${jamesCliWithOptions} removedomain james.linagora.com || true"
-                                sh "docker exec james_run ${jamesCliWithOptions} adddomain open-paas.org"
-                                for (int n = 0; n <= 100; n++) {
-                                    sh "docker exec james_run ${jamesCliWithOptions} adduser user${n}@open-paas.org secret"
-                                }
-                            }
-                            if (params.PROFILE == "reference") {
-                                // Sometimes Zenko fails at starting correctly with provisioning.
-                                // There is a timeout when it tries to connect to metadata service.
-                                // Maybe too much metadata to charge? It seems a restart helps with this for now
-                                sh "docker restart s3"
-                                sh 'sleep 5'
-                            }
-                        }
-                    }
-                }
-            }
-        }
-        stage('Start injector') {
-            agent {
-                node {
-                    label 'injector'
-                }
-            }
-            stages {
-                stage('Gatling warmup') {
-                    steps {
-                        build job: 'Gatling-job', parameters: [[$class: 'StringParameterValue', name: 'SBT_ACTION', value: "gatling:testOnly ${params.SIMULATION}"], [$class: 'StringParameterValue', name: 'GITHUB', value: params.GITHUB_SIMULATIONS], [$class: 'StringParameterValue', name: 'DURATION', value: params.WARMUP_DURATION]], propagate: false
-                    }
-                }
-                stage('Run Gatling test') {
-                    steps {
-                        build job: 'Gatling-job', parameters: [[$class: 'StringParameterValue', name: 'SBT_ACTION', value: "gatling:testOnly ${params.SIMULATION}"], [$class: 'StringParameterValue', name: 'GITHUB', value: params.GITHUB_SIMULATIONS]]
-                    }
-                }
-            }
-        }
-    }
-
-    post {
-        always {
-            node('target') {
-                script {
-                    sh 'docker logs james_run || true'
-                    sh 'docker rm -f cassandra rabbitmq elasticsearch tika s3 james_run || true'
-                    sh 'sudo btrfs subvolume delete /srv/bench-running-docker || true'
-                }
-            }
-        }
-    }
-}
diff --git a/benchmarks/s3.properties b/benchmarks/s3.properties
deleted file mode 100644
index 162c566..0000000
--- a/benchmarks/s3.properties
+++ /dev/null
@@ -1,81 +0,0 @@
-# ============================================= BlobStore Implementation ==================================
-# Read https://james.apache.org/server/config-blobstore.html for further details
-
-# Choose your BlobStore implementation
-# Mandatory, allowed values are: cassandra, s3, hybrid
-# hybrid is using both objectstorage for unfrequently read or big blobs & cassandra for small, often read blobs
-implementation=s3
-
-# ========================================= ObjectStorage deduplication ========================================
-# If you choose to enable deduplication, the mails with the same content will be stored only once.
-# Warning: Once this feature is enabled, there is no turning back as turning it off will lead to the deletion of all
-# the mails sharing the same content once one is deleted.
-# Mandatory, Allowed values are: true, false
-deduplication.enable=false
-
-# ========================================= Cassandra BlobStore Cache ======================================
-# A cassandra cache can be enabled to reduce latency when reading small blobs frequently
-# A dedicated keyspace with a replication factor of one is then used
-# Cache eviction policy is TTL based
-# Only blobs below a given threshold will be stored.
-# To be noted that blobs are stored within a single Cassandra row, hence a low threshold should be used.
-
-# Enable the cache? Optional and default to false. Must be a boolean.
-cache.enable=false
-
-# Cache eviction policy is TTL based. Optional and defaults to 7 days. Must be a duration.
-# Valid units: ms, sec, min, hour, day, week, month, year
-# cache.cassandra.ttl=7days
-
-# Timeout after which this cache should be bypassed. Optional and defaults to 100ms. Can not exceed 1 hour.
-# Must be a duration Valid units: ms, sec, min, hour, day, week, month, year
-# cache.cassandra.timeout=100ms
-
-# Maximum size of stored objects expressed in bytes. Must be strictly positive. Defaults to 8192.
-# Units: bytes, Kib, MiB, GiB, TiB
-# cache.sizeThresholdInBytes=8 KiB
-
-# ============================================== ObjectStorage ============================================
-
-# ========================================= ObjectStorage Buckets ==========================================
-# bucket names prefix
-# Optional, default no prefix
-# objectstorage.bucketPrefix=prod-
-
-# Default bucket name
-# Optional, default is bucketPrefix + `default`
-objectstorage.namespace=${env:OS_NAMESPACE}
-
-
-# ========================================= ObjectStorage on S3 =============================================
-objectstorage.s3.region=${env:OS_S3_REGION}
-objectstorage.s3.endPoint=${env:OS_S3_ENDPOINT}
-objectstorage.s3.accessKeyId=${env:OS_S3_ACCESSKEYID}
-objectstorage.s3.secretKey=${env:OS_S3_SECRETKEY}
-
-# ============================================ Blobs Exporting ==============================================
-# Read https://james.apache.org/server/config-blob-export.html for further details
-
-# Choosing blob exporting mechanism, allowed mechanism are: localFile, linshare
-# LinShare is a file sharing service, will be explained in the below section
-# Optional, default is localFile
-blob.export.implementation=localFile
-
-# ======================================= Local File Blobs Exporting ========================================
-# Optional, directory to store exported blob, directory path follows James file system format
-# default is file://var/blobExporting
-blob.export.localFile.directory=file://var/blobExporting
-
-# ======================================= LinShare File Blobs Exporting ========================================
-# LinShare is a sharing service where you can use james, connects to an existing LinShare server and shares files to
-# other mail addresses as long as those addresses available in LinShare. For example you can deploy James and LinShare
-# sharing the same LDAP repository
-# Mandatory if you choose LinShare, url to connect to LinShare service
-# blob.export.linshare.url=http://linshare:8080
-
-# ======================================= LinShare Configuration BasicAuthentication ===================================
-# Authentication is mandatory if you choose LinShare, TechnicalAccount is need to connect to LinShare specific service.
-# For Example: It will be formalized to 'Authorization: Basic {Credential of UUID/password}'
-
-# blob.export.linshare.technical.account.uuid=Technical_Account_UUID
-# blob.export.linshare.technical.account.password=password

---------------------------------------------------------------------
To unsubscribe, e-mail: notifications-unsubscribe@james.apache.org
For additional commands, e-mail: notifications-help@james.apache.org