Commit 72d54861 authored by John Hackett's avatar John Hackett

Initial commit of opennebula workings

Current status is broken networking, hallelujah. Almost certainly
dodgy bridge config.
parents
.vagrant
sudo brctl addif onebr0 eth0
sudo ip route add 192.168.0.0/24 dev eth0
# This guide is optimized for Vagrant 1.7 and above.
# Although versions 1.6.x should behave very similarly, it is recommended
# to upgrade instead of disabling the requirement below.
Vagrant.require_version ">= 1.7.0"
Vagrant.configure(2) do |config|
config.vm.box = "debian/jessie64"
# Disable the new default behavior introduced in Vagrant 1.7, to
# ensure that all Vagrant machines will use the same SSH key pair.
# See https://github.com/mitchellh/vagrant/issues/5005
config.ssh.insert_key = false
config.vm.provider :libvirt do |domain|
domain.memory = 4096
domain.cpus = 2
domain.nested = true
domain.volume_cache = 'none'
end
config.vm.define :default do |onenet|
onenet.vm.network :public_network,
:dev => "virbr1",
:mode => "bridge",
:type => "bridge",
:mgmt_attach => true,
:management_network_address => "192.168.0.0/16",
:mac => "52:54:00:40:b6:14"
end
config.vm.provision "ansible" do |ansible|
ansible.verbose = ""
ansible.playbook = "playbook.yml"
end
end
NAME = "boot2docker"
CONTEXT = [
NETWORK = "YES",
SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]" ]
CPU = "0.1"
DISK = [
IMAGE_ID = "0" ]
GRAPHICS = [
LISTEN = "0.0.0.0",
TYPE = "vnc" ]
LOGO = "images/logos/linux.png"
MEMORY = "512"
provision_logo: images/opennebula-5.0.png
enabled_tabs:
- provision-tab
- settings-tab
features:
# True to show showback monthly reports, and VM cost
showback: true
# Allows to change the security groups for each network interface
# on the VM creation dialog
secgroups: true
# True to hide the CPU setting in the VM creation dialog. The CPU setting
# will be set to the same value as VCPU, that will still be visible for the
# end users
instantiate_hide_cpu: true
tabs:
provision-tab:
panel_tabs:
vm_info_tab: false
vm_capacity_tab: false
vm_storage_tab: false
vm_network_tab: false
vm_snapshot_tab: false
vm_placement_tab: false
vm_actions_tab: false
vm_conf_tab: false
vm_template_tab: false
vm_log_tab: false
# provision_tabs:
# flows: true
# templates: true
actions: &provisionactions
# In the cloud view, delete is the equivalent
# of 'onetemplate chmod --recursive'
Template.chmod: false
# In the cloud view, delete is the equivalent
# of 'onetemplate delete --recursive'
Template.delete: true
VM.rename: true
VM.resume: true
VM.reboot: true
VM.reboot_hard: true
VM.poweroff: true
VM.poweroff_hard: true
VM.undeploy: false
VM.undeploy_hard: false
VM.terminate: true
VM.terminate_hard: true
VM.resize: false
VM.attachdisk: false
VM.detachdisk: false
VM.disk_saveas: false
VM.attachnic: false
VM.detachnic: false
VM.snapshot_create: false
VM.snapshot_revert: false
VM.snapshot_delete: false
VM.disk_snapshot_create: false
VM.disk_snapshot_revert: false
VM.disk_snapshot_delete: false
VM.save_as_template: true
dashboard:
# Connected user's quotas
quotas: true
# Overview of connected user's VMs
vms: true
# Group's quotas
groupquotas: false
# Overview of group's VMs
groupvms: false
create_vm:
# True to allow capacity (CPU, MEMORY, VCPU) customization
capacity_select: true
# True to allow NIC customization
network_select: true
# True to allow DISK size customization
disk_resize: true
settings-tab:
panel_tabs:
settings_info_tab: false
settings_config_tab: true
settings_quotas_tab: true
settings_accounting_tab: true
settings_showback_tab: true
actions:
# Buttons for settings_info_tab
User.update_password: true
User.login_token: true
# Buttons for settings_config_tab
Settings.change_language: true
Settings.change_password: true
Settings.change_view: true
Settings.ssh_key: true
Settings.login_token: true
# Edit button in settings_quotas_tab
User.quotas_dialog: false
vms-tab:
actions: *provisionactions
images-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
- 3 # Group
- 4 # Name
- 5 # Datastore
#- 6 # Size
- 7 # Type
#- 8 # Registration time
#- 9 # Persistent
- 10 # Status
- 11 # #VMs
#- 12 # Target
vnets-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
#- 3 # Group
- 4 # Name
#- 5 # Reservation
#- 6 # Cluster
#- 7 # Bridge
#- 8 # Leases
#- 9 # VLAN ID
secgroups-tab:
table_columns:
- 0 # Checkbox
- 1 # ID
- 2 # Owner
#- 3 # Group
- 4 # Name
#- 5 # Labels
\ No newline at end of file
#!/bin/bash
IMG_ID=`date +%s`
sudo -iu oneadmin onemarketapp download 6 /var/tmp/$IMG_ID
sudo -iu oneadmin oneimage create -d 1 --name boot2docker-$IMG_ID --path /var/tmp/$IMG_ID --prefix sd --type OS --driver raw --description "Boot2docker, OpenNebula contextualised"
# interface definition based upon default vagrant setup of 192.168.121.1/24 for its interfaces
auto onebr0
iface onebr0 inet static
bridge_ports eth1
bridge_stp on
address 192.168.121.240
netmask 255.255.255.0
network 192.168.121.0
gateway 192.168.121.1
# This is a basic configuration file for LXDock.
# All configuration is done through this YML file that should be placed at the root of your project.
# The file define a basic LXDock project containing a single container with highlights regarding
# some other useful options.
name: ansible-opennebula
image: debian/jessie
# By default LXDock creates a single "default" container if you don't specify a "containers" option.
# But you need the "containers" option if you have more than one container.
# containers:
# - name: ansible-opennebula01
# - name: ansible-opennebula02
# - name: ansible-opennebula03
# Most of the options can be redefined for each container definition, eg. the "image" option:
# image: archlinux
# You can use the "provisioning" option to define provisioning tools that should be used to
# provision your containers. For example, you could use Ansible as follows:
provisioning:
- type: ansible
playbook: playbook.yml
# A common need is to access your project folder in your containers. To do this you can use the
# "shares" option:
# shares:
# - source: .
# dest: /myshare
#!/bin/bash
#vagrant ssh -c "sudo sh -c 'echo oneadmin:password > /var/lib/one/.one/one_auth'"
vagrant ssh -c 'sudo cat /var/lib/one/.one/one_auth' > ../enrol/credentials
vagrant ssh -c 'sudo cat /var/lib/one/.one/one_auth'
vagrant ssh -c 'ip a'
---
- hosts: all
become: true
tasks:
- name: install opennebula key
apt_key: url=http://downloads.opennebula.org/repo/Debian/repo.key state=present
- name: install opennebula repo
apt_repository:
repo: deb http://downloads.opennebula.org/repo/5.4/Debian/8 stable opennebula
state: present
- name: install opennebula
apt: name={{ item }} state=installed
with_items:
- memcached
- opennebula-sunstone
- opennebula-gate
- opennebula-flow
- opennebula-node
- opennebula
- libsqlite3-dev
- make
- ruby-dev
- libcurl3-dev
- libxml2-dev
- gcc
- g++
- name: install gems
command: /usr/share/one/install_gems --yes
# - name: copy cloudview config into place
# copy:
# src: 'files/sunstone-views/cloud.yaml'
# dest: '/etc/one/sunstone-views/cloud.yaml'
# owner: root
# group: root
# mode: 0644
- name: start services
systemd:
state: started
name: "{{ item }}"
with_items:
- opennebula
- opennebula-sunstone
- name: ssh authorized_keys
shell: "ssh-keygen -y -f /var/lib/one/.ssh/id_rsa > /var/lib/one/.ssh/authorized_keys"
become_user: oneadmin
- name: ssh keyscan (127.0.0.1)
shell: "ssh-keyscan -H 127.0.0.1 >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: ssh keyscan (jessie)
shell: "ssh-keyscan -H jessie >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: ssh keyscan (localhost)
shell: "ssh-keyscan -H localhost >> /var/lib/one/.ssh/known_hosts"
become_user: oneadmin
- name: install libvirt
apt: name={{ item }} state=installed
with_items:
- libvirt-daemon
- libvirt-bin
- name: sleep long enough for sunstone to start
wait_for:
port: 2633
delay: 3
- name: add hosts
command: "onehost create 127.0.0.1 -i kvm -v kvm"
become_user: oneadmin
register: host_create
failed_when: host_create.rc == 1
- name: sleep 5
pause: seconds=15
- name: download boot2docker image
command: "onemarketapp download 6 /var/tmp/boot2docker"
become_user: oneadmin
failed_when: host_create.rc == 1
- name: add boot2docker image
command: "oneimage create -d 1 --name boot2docker --path /var/tmp/boot2docker --type OS --prefix vd --type OS --driver raw --description \"boot2docker, OpenNebula contextualised\""
become_user: oneadmin
failed_when: host_create.rc == 1
- name: create boot2docker template
command: "onetemplate create /vagrant/boot2docker.tmpl"
become_user: oneadmin
failed_when: host_create.rc == 1
- name: create public network template
command: "onevnet create /vagrant/vnet_public.tmpl"
become_user: oneadmin
failed_when: host_create.rc == 1
- name: create private network template
command: "onevnet create /vagrant/vnet_private.tmpl"
become_user: oneadmin
failed_when: host_create.rc == 1
- name: ipv4 forward
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
reload: yes
- name: Install bridge-utils
apt: name=bridge-utils state=present
- name: Setup bridge onebr0 where eth0 connects to this bridge
template: src=interface.onebr0 dest=/etc/network/interfaces.d/onebr0
notify:
- restart networking
handlers:
- name: restart networking
service: name=networking state=restarted
require 'opennebula'
include OpenNebula
client = OpenNebula::Client.new(ENV['opennebula_credentials'],ENV['opennebula_oned_host'])
vdc_xml = OpenNebula::Vdc.build_xml
vdc = OpenNebula::Vdc.new(vdc_xml, client)
puts vdc.inspect
#Configuration attributes (dummy driver)
NAME = "private"
DESCRIPTION = "A private network for VM inter-communication"
VN_MAD = "vxlan"
PHYDEV = "eth1"
VLAN_ID="2"
BRIDGE = "onebr0"
CLUSTERS = [
ID = 0
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "192.168.0.255"
NETWORK_MASK = "255.255.255.0"
DNS = "192.168.0.1"
GATEWAY = "192.168.0.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "192.168.0.144", SIZE = "15"]
}
#Configuration attributes (dummy driver)
NAME = "public"
DESCRIPTION = "A public network for IP addresses"
VN_MAD = "fw"
PHYDEV = "eth0"
CLUSTERS = [
ID = 0
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "192.168.0.255"
NETWORK_MASK = "255.255.255.0"
DNS = "192.168.0.1"
GATEWAY = "192.168.0.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "192.168.0.128", SIZE = "15"]
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment