You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by yw...@apache.org on 2019/10/14 03:00:26 UTC

[bigtop] branch cnb updated: BIGTOP-3238: Local muti-node cluster via vagrant and kubespray

This is an automated email from the ASF dual-hosted git repository.

ywkim pushed a commit to branch cnb
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/cnb by this push:
     new dccbc42  BIGTOP-3238: Local muti-node cluster via vagrant and kubespray
dccbc42 is described below

commit dccbc42270058fe29af5223bf8e6b642320be469
Author: Youngwoo Kim <yw...@apache.org>
AuthorDate: Mon Oct 14 10:29:58 2019 +0900

    BIGTOP-3238: Local muti-node cluster via vagrant and kubespray
---
 README.md                     |  38 +++++++
 bigtop.bom                    |  12 +++
 build.gradle                  |   2 +-
 kubespray/vagrant/Vagrantfile | 227 ++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 278 insertions(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 5bec287..0db5be6 100755
--- a/README.md
+++ b/README.md
@@ -1,3 +1,41 @@
+[![Travis CI](https://img.shields.io/travis/apache/bigtop.svg?branch=master)](https://travis-ci.org/apache/bigtop)
+
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+[Apache Bigtop](http://bigtop.apache.org/)
+==========================================
+
+TBD
+
+# Get Started with Deployment and Smoke Testing of Cloud Native BigTop
+
+Prerequisites:
+- Vagrant
+- Java 
+
+## Set up 3-Node Kubernetes cluster via Kubespray on local machine
+```
+$ cd $BIGTOP_HOME
+$ ./gradlew kubespray-clean kubespray-download && cd dl/ && tar xvfz kubespray-2.11.0.tar.gz
+$ cd dl/kubespray-2.11.0/ && cp ../../kubespray/vagrant/Vagrantfile .
+$ vagrant up
+```
+
+# Cloud Native Bigtop
 This is the content for the talk given by jay vyas and sid mani @ apachecon 2019 in Las Vegas,  you can watch it here  https://www.youtube.com/watch?v=LUCE63q !
 
 # TLDR, heres how you create an analytics distro on K8s...
diff --git a/bigtop.bom b/bigtop.bom
index bafca95..99ab5c8 100644
--- a/bigtop.bom
+++ b/bigtop.bom
@@ -122,6 +122,18 @@ bigtop {
   ]
 
   components {
+    'kubespray' {
+      name    = "kubespray"
+      pkg     = "kubespray"
+      relNotes = ""
+      website = "https://github.com/kubernetes-sigs/kubespray"
+      version { base = '2.11.0'; pkg = base; release = 1 }
+      tarball { destination = "$name-${version.base}.tar.gz"
+                source      = "v${version.base}.tar.gz" }
+      url     { site = "https://github.com/kubernetes-sigs/kubespray/archive/"
+                archive = site }
+    }
+
     'zookeeper' {
       name    = 'zookeeper'
       pkg     = name
diff --git a/build.gradle b/build.gradle
index 8dc9095..a9065fd 100644
--- a/build.gradle
+++ b/build.gradle
@@ -23,7 +23,7 @@ buildscript {
 }
 
 plugins {
-  id "de.undercouch.download" version "3.2.0"
+  id "de.undercouch.download" version "4.0.0"
   id "org.nosphere.apache.rat" version "0.2.0"
 }
 
diff --git a/kubespray/vagrant/Vagrantfile b/kubespray/vagrant/Vagrantfile
new file mode 100644
index 0000000..9702059
--- /dev/null
+++ b/kubespray/vagrant/Vagrantfile
@@ -0,0 +1,227 @@
+# -*- mode: ruby -*-
+# # vi: set ft=ruby :
+
+# For help on using kubespray with vagrant, check out docs/vagrant.md
+
+require 'fileutils'
+
+Vagrant.require_version ">= 2.0.0"
+
+CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
+
+COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
+
+# Uniq disk UUID for libvirt
+DISK_UUID = Time.now.utc.to_i
+
+SUPPORTED_OS = {
+  "coreos-stable"       => {box: "coreos-stable",      user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
+  "coreos-alpha"        => {box: "coreos-alpha",       user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
+  "coreos-beta"         => {box: "coreos-beta",        user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
+  "ubuntu1604"          => {box: "generic/ubuntu1604", user: "vagrant"},
+  "ubuntu1804"          => {box: "generic/ubuntu1804", user: "vagrant"},
+  "centos"              => {box: "centos/7",           user: "vagrant"},
+  "centos-bento"        => {box: "bento/centos-7.6",   user: "vagrant"},
+  "fedora"              => {box: "fedora/28-cloud-base",                user: "vagrant"},
+  "opensuse"            => {box: "opensuse/openSUSE-15.0-x86_64",       user: "vagrant"},
+  "opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
+  "oraclelinux"         => {box: "generic/oracle7", user: "vagrant"},
+}
+
+# Defaults for config options defined in CONFIG
+$num_instances = 3
+$instance_name_prefix = "k8s"
+$vm_gui = false
+$vm_memory = 2048
+$vm_cpus = 1
+$shared_folders = {}
+$forwarded_ports = {}
+$subnet = "172.17.8"
+$os = "centos"
+$network_plugin = "flannel"
+# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
+$multi_networking = false
+# The first three nodes are etcd servers
+$etcd_instances = $num_instances
+# The first two nodes are kube masters
+$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
+# All nodes are kube nodes
+$kube_node_instances = $num_instances
+# The following only works when using the libvirt provider
+$kube_node_instances_with_disks = false
+$kube_node_instances_with_disks_size = "20G"
+$kube_node_instances_with_disks_number = 2
+$override_disk_size = false
+$disk_size = "20GB"
+$local_path_provisioner_enabled = false
+$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
+
+$playbook = "cluster.yml"
+
+host_vars = {}
+
+if File.exist?(CONFIG)
+  require CONFIG
+end
+
+$box = SUPPORTED_OS[$os][:box]
+# if $inventory is not set, try to use example
+$inventory = "inventory/sample" if ! $inventory
+$inventory = File.absolute_path($inventory, File.dirname(__FILE__))
+
+# if $inventory has a hosts.ini file use it, otherwise copy over
+# vars etc to where vagrant expects dynamic inventory to be
+if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
+  $vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
+  FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
+  if ! File.exist?(File.join($vagrant_ansible,"inventory"))
+    FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
+  end
+end
+
+if Vagrant.has_plugin?("vagrant-proxyconf")
+    $no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
+    (1..$num_instances).each do |i|
+        $no_proxy += ",#{$subnet}.#{i+100}"
+    end
+end
+
+Vagrant.configure("2") do |config|
+
+  config.vm.box = $box
+  if SUPPORTED_OS[$os].has_key? :box_url
+    config.vm.box_url = SUPPORTED_OS[$os][:box_url]
+  end
+  config.ssh.username = SUPPORTED_OS[$os][:user]
+
+  # plugin conflict
+  if Vagrant.has_plugin?("vagrant-vbguest") then
+    config.vbguest.auto_update = false
+  end
+
+  # always use Vagrants insecure key
+  config.ssh.insert_key = false
+
+  if ($override_disk_size)
+    unless Vagrant.has_plugin?("vagrant-disksize")
+      system "vagrant plugin install vagrant-disksize"
+    end
+    config.disksize.size = $disk_size
+  end
+
+  (1..$num_instances).each do |i|
+    config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
+
+      node.vm.hostname = vm_name
+
+      if Vagrant.has_plugin?("vagrant-proxyconf")
+        node.proxy.http     = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
+        node.proxy.https    = ENV['HTTPS_PROXY'] || ENV['https_proxy'] ||  ""
+        node.proxy.no_proxy = $no_proxy
+      end
+
+      ["vmware_fusion", "vmware_workstation"].each do |vmware|
+        node.vm.provider vmware do |v|
+          v.vmx['memsize'] = $vm_memory
+          v.vmx['numvcpus'] = $vm_cpus
+        end
+      end
+
+      node.vm.provider :virtualbox do |vb|
+        vb.memory = $vm_memory
+        vb.cpus = $vm_cpus
+        vb.gui = $vm_gui
+        vb.linked_clone = true
+        vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
+      end
+
+      node.vm.provider :libvirt do |lv|
+        lv.memory = $vm_memory
+        lv.cpus = $vm_cpus
+        lv.default_prefix = 'kubespray'
+        # Fix kernel panic on fedora 28
+        if $os == "fedora"
+          lv.cpu_mode = "host-passthrough"
+        end
+      end
+
+      if $kube_node_instances_with_disks
+        # Libvirt
+        driverletters = ('a'..'z').to_a
+        node.vm.provider :libvirt do |lv|
+          # always make /dev/sd{a/b/c} so that CI can ensure that
+          # virtualbox and libvirt will have the same devices to use for OSDs
+          (1..$kube_node_instances_with_disks_number).each do |d|
+            lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
+          end
+        end
+      end
+
+      if $expose_docker_tcp
+        node.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
+      end
+
+      $forwarded_ports.each do |guest, host|
+        node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
+      end
+
+      node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
+      node.vm.synced_folder "../../", "/bigtop", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
+
+      $shared_folders.each do |src, dst|
+        node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
+      end
+
+      ip = "#{$subnet}.#{i+100}"
+      node.vm.network :private_network, ip: ip
+
+      # Disable swap for each vm
+      node.vm.provision "shell", inline: "swapoff -a"
+
+      host_vars[vm_name] = {
+        "ip": ip,
+        "flannel_interface": "eth1",
+        "kube_network_plugin": $network_plugin,
+        "kube_network_plugin_multus": $multi_networking,
+        "download_run_once": "True",
+        "download_localhost": "False",
+        "download_cache_dir": ENV['HOME'] + "/kubespray_cache",
+        # Make kubespray cache even when download_run_once is false
+        "download_force_cache": "True",
+        # Keeping the cache on the nodes can improve provisioning speed while debugging kubespray
+        "download_keep_remote_cache": "False",
+        "docker_keepcache": "1",
+        # These two settings will put kubectl and admin.config in $inventory/artifacts
+        "kubeconfig_localhost": "True",
+        "kubectl_localhost": "True",
+        "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
+        "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
+        "ansible_ssh_user": SUPPORTED_OS[$os][:user]
+      }
+
+      # Only execute the Ansible provisioner once, when all the machines are up and ready.
+      if i == $num_instances
+        node.vm.provision "ansible" do |ansible|
+          ansible.playbook = $playbook
+          $ansible_inventory_path = File.join( $inventory, "hosts.ini")
+          if File.exist?($ansible_inventory_path)
+            ansible.inventory_path = $ansible_inventory_path
+          end
+          ansible.become = true
+          ansible.limit = "all"
+          ansible.host_key_checking = false
+          ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
+          ansible.host_vars = host_vars
+          #ansible.tags = ['download']
+          ansible.groups = {
+            "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
+            "kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
+            "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
+            "k8s-cluster:children" => ["kube-master", "kube-node"],
+          }
+        end
+      end
+
+    end
+  end
+end