You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@hbase.apache.org by GitBox <gi...@apache.org> on 2020/06/13 20:04:37 UTC

[GitHub] [hbase] mattf-apache commented on a change in pull request #1620: HBASE-23339 Release scripts should use forwarded gpg-agent

mattf-apache commented on a change in pull request #1620:
URL: https://github.com/apache/hbase/pull/1620#discussion_r439580182



##########
File path: dev-support/create-release/README.txt
##########
@@ -37,15 +47,53 @@ $ sudo add-apt-repository -y \
    stable"
 $ sudo apt-get update
 $ sudo apt-get install -y docker-ce docker-ce-cli containerd.io
-$ sudo usermod -a -G docker $USERID
+# Follow the post installation steps: https://docs.docker.com/engine/install/linux-postinstall/
+$ sudo usermod -aG docker $USER
 # LOGOUT and then LOGIN again so $USERID shows as part of docker group
-# Copy up private key for $USERID export from laptop and import on gce.
-$ gpg --import stack.duboce.net.asc
-$ export GPG_TTY=$(tty) # https://github.com/keybase/keybase-issues/issues/2798
-$ eval $(gpg-agent --disable-scdaemon --daemon --no-grab  --allow-preset-passphrase --default-cache-ttl=86400 --max-cache-ttl=86400)
-$ export PROJECT="${PROJECT:-hbase}"
-$ git clone https://github.com/apache/${PROJECT}.git
-$ cd "${PROJECT}"
+# Test here by running docker's hello world as your build user
+$ docker run hello-world
+
+# Follow the GPG guide for forwarding your gpg-agent from your local machine to the VM
+#   https://wiki.gnupg.org/AgentForwarding
+# On the VM find out the location of the gpg agent socket and extra socket
+$ gpgconf --list-dir agent-socket
+/run/user/1000/gnupg/S.gpg-agent
+$ gpgconf --list-dir agent-extra-socket
+/run/user/1000/gnupg/S.gpg-agent.extra
+# On the VM configure sshd to remove stale sockets
+$ sudo bash -c 'echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config'
+$ sudo systemctl restart ssh
+# logout of the VM
+
+# Do these steps on your local machine.
+# Export your public key and copy it to the VM.
+# Assuming 'example.gce.host' maps to your VM's external IP (or use the IP)
+$ gpg --export example@apache.org > ~/gpg.example.apache.pub

Review comment:
       Yes, brilliant!

##########
File path: dev-support/create-release/README.txt
##########
@@ -17,13 +17,23 @@ anomalies are explained up in JIRA.
 
 See http://hbase.apache.org/book.html#maven.release
 
+Before starting an RC build, make sure your local gpg-agent has configs
+to properly handle your credentials, especially if you want to avoid
+typing the passphrase to your secret key.
+
+e.g. if you are going to run and step away, best to increase the TTL
+on caching the unlocked secret via ~/.gnupg/gpg-agent.conf
+  # in seconds, e.g. a day
+  default-cache-ttl 86400
+  max-cache-ttl 86400
+
 Running a build on GCE is easy enough. Here are some notes if of use.
 Create an instance. 4CPU/15G/10G disk seems to work well enough.
 Once up, run the below to make your machine fit for RC building:
 
-# Presuming debian-compatible OS
-$ sudo apt-get install -y git openjdk-8-jdk maven gnupg gnupg-agent

Review comment:
       Nice minimization. The JDK is indeed unneeded on the host, unless the user for some reason chooses to use the `-j` option. And maven is not needed.

##########
File path: dev-support/create-release/do-release.sh
##########
@@ -42,27 +43,41 @@ fi
 
 # If running in docker, import and then cache keys.
 if [ "$RUNNING_IN_DOCKER" = "1" ]; then
-  # Run gpg agent.
-  eval "$(gpg-agent --disable-scdaemon --daemon --no-grab  --allow-preset-passphrase \
-          --default-cache-ttl=86400 --max-cache-ttl=86400)"
-  echo "GPG Version: $(gpg --version)"
-  # Inside docker, need to import the GPG keyfile stored in the current directory.
-  # (On workstation, assume GPG has access to keychain/cache with key_id already imported.)
-  echo "$GPG_PASSPHRASE" | $GPG --passphrase-fd 0 --import "$SELF/gpg.key"
+  # when Docker Desktop for mac is running under load there is a delay before the mounted volume
+  # becomes available. if we do not pause then we may try to use the gpg-agent socket before docker
+  # has got it ready and we will not think there is a gpg-agent.
+  if [ "${HOST_OS}" == "DARWIN" ]; then
+    sleep 5
+  fi
+  # in docker our working dir is set to where all of our scripts are held
+  # and we want default output to go into the "output" directory that should be in there.
+  if [ -d "output" ]; then
+    cd output
+  fi
+  echo "GPG Version: $("${GPG}" "${GPG_ARGS[@]}" --version)"
+  # Inside docker, need to import the GPG key stored in the current directory.
+  $GPG "${GPG_ARGS[@]}" --import "$SELF/gpg.key.public"
 
   # We may need to adjust the path since JAVA_HOME may be overridden by the driver script.
   if [ -n "$JAVA_HOME" ]; then
+    echo "Using JAVA_HOME from host."
     export PATH="$JAVA_HOME/bin:$PATH"
   else
     # JAVA_HOME for the openjdk package.
-    export JAVA_HOME=/usr
+    export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
   fi
 else
   # Outside docker, need to ask for information about the release.
   get_release_info
 fi
+
 GPG_TTY="$(tty)"
 export GPG_TTY
+echo "Testing gpg signing."
+echo "foo" > gpg_test.txt
+"${GPG}" "${GPG_ARGS[@]}" --detach --armor --sign gpg_test.txt
+# In --batch mode we have to be explicit about what we are verifying
+"${GPG}" "${GPG_ARGS[@]}" --verify gpg_test.txt.asc gpg_test.txt

Review comment:
       Good.

##########
File path: dev-support/create-release/release-util.sh
##########
@@ -381,8 +383,6 @@ function configure_maven {
       <password>${env.ASF_PASSWORD}</password></server>
     <server><id>apache.releases.https</id><username>${env.ASF_USERNAME}</username>
       <password>${env.ASF_PASSWORD}</password></server>
-    <server><id>gpg.passphrase</id>
-      <passphrase>${env.GPG_PASSPHRASE}</passphrase></server>

Review comment:
       @ndimiduk , do note this was NOT writing the passphrase literally into the maven settings.xml file.
   That got taken care of already; it is just a reference to the environment variable which held the passphrase.

##########
File path: dev-support/create-release/README.txt
##########
@@ -37,15 +47,53 @@ $ sudo add-apt-repository -y \
    stable"
 $ sudo apt-get update
 $ sudo apt-get install -y docker-ce docker-ce-cli containerd.io
-$ sudo usermod -a -G docker $USERID
+# Follow the post installation steps: https://docs.docker.com/engine/install/linux-postinstall/
+$ sudo usermod -aG docker $USER
 # LOGOUT and then LOGIN again so $USERID shows as part of docker group
-# Copy up private key for $USERID export from laptop and import on gce.
-$ gpg --import stack.duboce.net.asc
-$ export GPG_TTY=$(tty) # https://github.com/keybase/keybase-issues/issues/2798
-$ eval $(gpg-agent --disable-scdaemon --daemon --no-grab  --allow-preset-passphrase --default-cache-ttl=86400 --max-cache-ttl=86400)
-$ export PROJECT="${PROJECT:-hbase}"
-$ git clone https://github.com/apache/${PROJECT}.git
-$ cd "${PROJECT}"
+# Test here by running docker's hello world as your build user
+$ docker run hello-world
+
+# Follow the GPG guide for forwarding your gpg-agent from your local machine to the VM
+#   https://wiki.gnupg.org/AgentForwarding
+# On the VM find out the location of the gpg agent socket and extra socket
+$ gpgconf --list-dir agent-socket
+/run/user/1000/gnupg/S.gpg-agent
+$ gpgconf --list-dir agent-extra-socket
+/run/user/1000/gnupg/S.gpg-agent.extra
+# On the VM configure sshd to remove stale sockets
+$ sudo bash -c 'echo "StreamLocalBindUnlink yes" >> /etc/ssh/sshd_config'
+$ sudo systemctl restart ssh
+# logout of the VM
+
+# Do these steps on your local machine.
+# Export your public key and copy it to the VM.
+# Assuming 'example.gce.host' maps to your VM's external IP (or use the IP)
+$ gpg --export example@apache.org > ~/gpg.example.apache.pub
+$ scp ~/gpg.example.apache.pub example.gce.host:
+# ssh into the VM while forwarding the remote gpg socket locations found above to your local
+#   gpg-agent's extra socket (this will restrict what commands the remote node is allowed to have
+#   your agent handle. Note that the gpg guide above can help you set this up in your ssh config
+#   rather than typing it in ssh like this every time.
+$ ssh -i ~/.ssh/my_id \
+    -R "/run/user/1000/gnupg/S.gpg-agent:$(gpgconf --list-dir agent-extra-socket)" \
+    -R "/run/user/1000/gnupg/S.gpg-agent.extra:$(gpgconf --list-dir agent-extra-socket)" \
+    example.gce.host

Review comment:
       @busbey , I'm not sure this is an issue, I'm just going by the docs.  However:
   Does this work correctly if gpg-agent is not already running on my local machine, given that the remote invocation of gpg on line 84 uses `--no-autostart`?
   In other words, should the instructions include something at about line 72 that will guarantee the local gpg-agent is running, such as signing a foo file locally too?

##########
File path: dev-support/create-release/do-release.sh
##########
@@ -17,6 +17,7 @@
 # limitations under the License.
 #
 
+set -e

Review comment:
       No harm, but it is set for you in line 29 when release-util.sh is sourced.
   Maybe it's good to have it evident here too.

##########
File path: dev-support/create-release/hbase-rm/Dockerfile
##########
@@ -50,10 +50,15 @@ RUN wget -qO- "https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&fi
         tar xvz -C /opt
 ENV YETUS_HOME /opt/apache-yetus-${YETUS_VERSION}
 
-WORKDIR /opt/hbase-rm/output
-
 ARG UID
-RUN useradd -m -s /bin/bash -p hbase-rm -u $UID hbase-rm
-USER hbase-rm:hbase-rm
+ARG RM_USER
+RUN groupadd hbase-rm && \
+    useradd --create-home --shell /bin/bash -p hbase-rm -u $UID $RM_USER && \
+    mkdir /home/$RM_USER/.gnupg && \
+    chown -R $RM_USER:hbase-rm /home/$RM_USER && \
+    chmod -R 700 /home/$RM_USER
+
+USER $RM_USER:hbase-rm
+WORKDIR /home/$RM_USER/hbase-rm/

Review comment:
       Nice improvement. Was this mandatory for the ssh tunneled gpg-agent to work?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org