Commit 76718c3a authored by John Hackett's avatar John Hackett

Initial commit, terraform provisions an lxd container locally and ansible provisions it

parents
debian-9.qcow2
.terraform/
terraform.tfstate*
playbook.retry
virt-builder debian-9 --root-password password:super-secret-password-123 --format qcow2
NAME = "boot2docker"
CONTEXT = [
NETWORK = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" ]
CPU = "0.1"
DISK = [
IMAGE_ID = "0" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
TYPE = "vnc" ]
LOGO = "images/logos/linux.png"
MEMORY = "512"
#Configuration attributes (dummy driver)
NAME = "public"
DESCRIPTION = "A public network for IP addresses"
VN_MAD = "fw"
PHYDEV = "eth0"
CLUSTERS = [
ID = 0
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "192.168.255.255"
NETWORK_MASK = "255.255.0.0"
DNS = "192.168.0.1"
GATEWAY = "192.168.3.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "192.168.3.5", SIZE = "50"]
}
# interface definition based upon default vagrant setup of 192.168.121.1/24 for its interfaces
auto onebr0
iface onebr0 inet dhcp
bridge_ports ens6
bridge_stp off
oneadmin:password
This diff is collapsed.
<network ipv6='yes'>
<name>opennebula</name>
<uuid>642b1aad-fcc9-4f2c-9d34-f0c0994d0cf6</uuid>
<forward mode='nat'/>
<bridge name='onebr0' stp='on' delay='0'/>
<mac address='52:54:00:2c:26:64'/>
<ip address='192.168.5.1' netmask='255.255.255.0' />
</network>
#Configuration attributes (dummy driver)
NAME = "ovs_public"
DESCRIPTION = "An OVS public network for IP addresses"
VN_MAD = "ovswitch"
BRIDGE = "onebr0"
CLUSTERS = [
ID = 0
]
VLAN_IDS = [
START = "10",
RESERVED = "0, 1, 4095"
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "10.0.24.0"
NETWORK_MASK = "255.255.255.0"
DNS = "192.168.0.1"
GATEWAY = "10.0.24.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "10.0.24.5", SIZE = "150"]
}
provision_logo: images/opennebula-5.0.png
enabled_tabs:
- provision-tab
- settings-tab
features:
# True to show showback monthly reports, and VM cost
showback: true
# Allows to change the security groups for each network interface
# on the VM creation dialog
secgroups: true
# True to hide the CPU setting in the VM creation dialog. The CPU setting
# will be set to the same value as VCPU, that will still be visible for the
# end users
instantiate_hide_cpu: true
tabs:
provision-tab:
panel_tabs:
vm_info_tab: false
vm_capacity_tab: false
vm_storage_tab: false
vm_network_tab: false
vm_snapshot_tab: false
vm_placement_tab: false
vm_actions_tab: false
vm_conf_tab: false
vm_template_tab: false
vm_log_tab: false
# provision_tabs:
# flows: true
# templates: true
actions: &provisionactions
# In the cloud view, delete is the equivalent
# of 'onetemplate chmod --recursive'
Template.chmod: false
# In the cloud view, delete is the equivalent
# of 'onetemplate delete --recursive'
Template.delete: true
VM.rename: true
VM.resume: true
VM.reboot: true
VM.reboot_hard: true
VM.poweroff: true
VM.poweroff_hard: true
VM.undeploy: false
VM.undeploy_hard: false
VM.terminate: true
VM.terminate_hard: true
VM.resize: false
VM.attachdisk: false
VM.detachdisk: false
VM.disk_saveas: false
VM.attachnic: false
VM.detachnic: false
VM.snapshot_create: false
VM.snapshot_revert: false
VM.snapshot_delete: false
VM.disk_snapshot_create: false
VM.disk_snapshot_revert: false
VM.disk_snapshot_delete: false
VM.save_as_template: true
dashboard:
# Connected user's quotas
quotas: true
# Overview of connected user's VMs
vms: true
# Group's quotas
groupquotas: false
# Overview of group's VMs
groupvms: false
create_vm:
# True to allow capacity (CPU, MEMORY, VCPU) customization
capacity_select: true
# True to allow NIC customization
network_select: true
# True to allow DISK size customization
disk_resize: true
settings-tab:
panel_tabs:
settings_info_tab: false
settings_config_tab: true
settings_quotas_tab: true
settings_accounting_tab: true
settings_showback_tab: true
actions:
# Buttons for settings_info_tab
User.update_password: true
User.login_token: true
# Buttons for settings_config_tab
Settings.change_language: true
Settings.change_password: true
Settings.change_view: true
Settings.ssh_key: true
Settings.login_token: true
# Edit button in settings_quotas_tab
User.quotas_dialog: false
vms-tab:
actions: *provisionactions
images-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
- 3 # Group
- 4 # Name
- 5 # Datastore
#- 6 # Size
- 7 # Type
#- 8 # Registration time
#- 9 # Persistent
- 10 # Status
- 11 # #VMs
#- 12 # Target
vnets-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
#- 3 # Group
- 4 # Name
#- 5 # Reservation
#- 6 # Cluster
#- 7 # Bridge
#- 8 # Leases
#- 9 # VLAN ID
secgroups-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
#- 3 # Group
- 4 # Name
#- 5 # Labels
\ No newline at end of file
#Configuration attributes (dummy driver)
NAME = "public network"
VN_MAD = "802.1Q"
PHYDEV = "eth0"
CLUSTERS = [
ID = 0
]
VLAN_IDS = [
START = "10",
RESERVED = "0, 1, 4095"
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "10.0.24.0"
NETWORK_MASK = "255.255.255.0"
DNS = "192.168.0.1"
GATEWAY = "10.0.24.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "10.0.24.5", SIZE = "150"]
}
provider "lxd" {
}
variable "ssh_public_key" {
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCltYpJjBXD99GIoM7fukdMdi5OXAdf32M4xNrg9kQwygOKV7CMAyoZgMm+Y7Y2wvderLoGx0Nd3cjXNhBJMj+SuSoL36c3/VyoICOATR0IWaavpmUWxhFa+fQEQTMaUTA4ngtVUlEoZyY+9NwK+zHy7cFBVw4zDl20aLswFqpCms9kAxl9HVCOI/j1wYP0m/vHEjf/s42KvX9EVbRUZ+ZwYnNxAjVx/XxecF4tTSfZk7Sw4D9stBHqSBxoKWCpIQwtSgz6AzvnS0eDtf3/75NDOhbCU6JEa9OTFcTa6GeiZzzAK3NxqOq62DxSpXZaQT0D501+AySCjxxUBZ3ZDyL1kcThdtMNRUtZKlniVBQOmqC7aZpt6x31GJmTeA5y7cufwihC88ZEockN4y5N8LDxxeMBB9w2TtU8dfTFab54bxJZj1+3jigBzp5IUJXgk2iIliV8DB9t6jUPEkS+lXREcHeaBpv0rKH/N5lhJh5V+wWgKhee7vU74XwnJW3XlYeu7+r6pHeCBUuWGPE6ohZVx3BNF2FIXp5S/wJFwsHHACffQcNLq7iMmxt2q7VKaz1V9KnW8vmeYe540YmDbDrQQwAeo4N+2VfMrBk0lOx3zoYLcaizAx/ubd64BUdj5J0cdVKCR8hrFGe8tjBIvDQg/BT0zb5pZV73V0YSiLKvyQ=="
}
resource "lxd_container" "opennebula" {
name = "opennebula"
image = "images:debian/stretch"
device {
name = "vol1"
type = "disk"
properties {
path = "/"
pool = "${lxd_storage_pool.opennebula.name}"
}
}
profiles = ["${lxd_profile.opennebula.name}"]
provisioner "local-exec" {
command = "lxc exec ${lxd_container.opennebula.name} -- apt-get -y install gnupg python avahi-daemon openssh-server && lxc exec ${lxd_container.opennebula.name} -- systemctl start sshd && lxc exec ${lxd_container.opennebula.name} -- systemctl start avahi-daemon"
}
provisioner "local-exec" {
command = "lxc exec ${lxd_container.opennebula.name} -- bash -c 'mkdir -p /root/.ssh/ && echo ${var.ssh_public_key} > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys && chmod 700 /root/.ssh'"
}
provisioner "local-exec" {
command = "export ANSIBLE_HOST_KEY_CHECKING=False && ansible-playbook -i ${lxd_container.opennebula.name}.local, playbook.yml -u root"
}
}
resource "lxd_profile" "opennebula" {
name = "opennebula"
device {
name = "wlp58s0"
type = "nic"
properties {
nictype = "bridged"
parent = "${lxd_network.opennebula.name}"
}
}
}
resource "lxd_storage_pool" "opennebula" {
name = "%s"
driver = "dir"
config {
source = "/var/lib/lxd/storage-pools/opennebula"
}
}
resource "lxd_network" "opennebula" {
name = "opennebula"
config {
ipv4.address = "10.0.25.1/24"
ipv4.nat = "true"
bridge.driver = "openvswitch"
}
}
---
- hosts: all
become: true
tasks:
- name: install opennebula key
apt_key: url=http://downloads.opennebula.org/repo/Debian/repo.key state=present
- name: install opennebula repo
apt_repository:
repo: deb http://downloads.opennebula.org/repo/5.4/Debian/9 stable opennebula
state: present
- name: install opennebula
apt: name={{ item }} state=installed
with_items:
- memcached
- opennebula-sunstone
- opennebula-gate
- opennebula-flow
- opennebula-node
- opennebula
- libsqlite3-dev
- make
- ruby-dev
- libcurl3-dev
- libxml2-dev
- gcc
- g++
- avahi-daemon
- ifupdown-extra
- acl
- tcpdump
- openvswitch-switch
# - name: create bridge
# openvswitch_bridge:
# bridge: onebr0
# state: present
- name: Set opennebula password
copy:
src: ./files/one_auth
dest: /var/lib/one/.one/one_auth
owner: oneadmin
group: oneadmin
mode: 0600
- name: install gems
command: /usr/share/one/install_gems --yes
# - name: copy cloudview config into place
# copy:
# src: 'files/sunstone-views/cloud.yaml'
# dest: '/etc/one/sunstone-views/cloud.yaml'
# owner: root
# group: root
# mode: 0644
# - name: set federation master
# file:
# path: /tmp/foobar
# state: touch
# delegate_to: "one"
- name: start services
systemd:
state: started
name: "{{ item }}"
with_items:
- opennebula
- opennebula-sunstone
- name: ssh authorized_keys
shell: "ssh-keygen -y -f /var/lib/one/.ssh/id_rsa > /var/lib/one/.ssh/authorized_keys"
become_user: oneadmin
- name: ssh keyscan (127.0.0.1)
shell: "ssh-keyscan -H 127.0.0.1 >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: ssh keyscan (local hostname)
shell: "ssh-keyscan -H {{ ansible_hostname }} >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: ssh keyscan (localhost)
shell: "ssh-keyscan -H localhost >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: install libvirt
apt: name={{ item }} state=installed
with_items:
- libvirt-daemon
- libvirt-clients
- name: sleep long enough for sunstone to start
wait_for:
port: 9869
delay: 5
- name: add hosts
command: "onehost create 127.0.0.1 -i kvm -v kvm"
become_user: oneadmin
register: host_create
failed_when: host_create.rc == 1
- name: sleep 3
pause: seconds=3
- name: copy configuration templates
copy:
src: "files/{{ item }}"
dest: "/tmp/{{ item }}"
with_items:
- boot2docker.tmpl
- vlan_public.tmpl
- interface.onebr0
- opennebula-bridge.xml
- name: download boot2docker image
command: "wget http://marketplace.opennebula.systems/appliance/56d073858fb81d0315000002/download/0 -O /var/tmp/boot2docker"
become_user: oneadmin
register: app_download
failed_when: app_download.rc == 1
- name: sleep 3
pause: seconds=3
- name: add boot2docker image
command: "oneimage create -d 1 --name boot2docker --path /var/tmp/boot2docker --type OS --prefix vd --type OS --driver raw --description \"boot2docker, OpenNebula contextualised\""
become_user: oneadmin
register: add_boot2docker
failed_when: add_boot2docker.rc == 1
- name: set open permissions on boot2docker
command: "oneimage chmod boot2docker 744"
become_user: oneadmin
register: chmod_boot2docker
failed_when: chmod_boot2docker.rc == 1
- name: create boot2docker template
command: "onetemplate create /tmp/boot2docker.tmpl"
become_user: oneadmin
register: add_boot2docker_template
failed_when: add_boot2docker_template.rc == 1
- name: set open permissions on boot2docker
command: "onetemplate chmod boot2docker 744"
become_user: oneadmin
register: chmod_boot2docker_template
failed_when: chmod_boot2docker_template.rc == 1
- name: create public network template
command: "onevnet create /tmp/vlan_public.tmpl"
become_user: oneadmin
register: vnet_create
failed_when: vnet_create.rc == 1
- name: create acl for hosts
command: "oneacl create '* HOST/* MANAGE'"
become_user: oneadmin
register: acl_create
failed_when: acl_create.rc == 1
# - name: create private network template
# command: "onevnet create /vagrant/vnet_private.tmpl"
# become_user: oneadmin
# failed_when: host_create.rc == 1
- name: ipv4 forward
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
reload: yes
- name: Install bridge-utils
apt: name=bridge-utils state=present
# - name: Setup bridge onebr0 where eth0 connects to this bridge
# template: src=files/interface.onebr0 dest=/etc/network/interfaces.d/onebr0
# notify:
# - restart networking
# - name: Install libvirt bridge
# copy:
# src: files/opennebula-bridge.xml
# dest: /etc/libvirt/qemu/networks/opennebula.xml
# notify:
# - libvirt network
handlers:
- name: restart networking
service: name=networking state=restarted
- name: libvirt network
command: virsh net-create /etc/libvirt/qemu/networks/opennebula.xml
become: true
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment