Commit af1c204a authored by John Hackett's avatar John Hackett

* Adding prep option to grab base image and start apt-cacher

* Moving management of files into resources instead of providers
* Adding makefile task for test suite
* Adding in provisioning scripts
* Adding in a terraform output for IPs
parent 2925d012
destroy:
destroy:
terraform destroy -force
up: build
build:
apply:
terraform apply -auto-approve
rebuild: destroy build
build: apply ansible
ansible:
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i `terraform output -json | jq -r .ip.value`, playbook.yml -u root
rebuild: destroy build ansible
apt-cache:
cat dockerfiles/apt-cacher-ng | docker build -t apt-cacher-ng -
......@@ -14,4 +17,16 @@ apt-cache:
docker rm apt-cacher-ng || true
docker run -d -p 3142:3142 --name apt-cacher-ng apt-cacher-ng
.PHONY: apt-cache build
\ No newline at end of file
provision:
mkdir -p .ruby-vendor
cd ./scripts/ && bundle install --path ../.ruby-vendor && bundle exec ruby provision.rb
test:
docker run -it --rm -v $(HOME)/.ssh:/keys -v $(PWD)/tests:/share chef/inspec exec -b ssh --host=$(tf_output_ip) -i /keys/id_rsa /share
image:
virt-builder debian-9 --root-password password:super-secret-password-123 --format qcow2
prep: image apt-cache
.PHONY: apt-cache converge
virt-builder debian-9 --root-password password:super-secret-password-123 --format qcow2
......@@ -4,6 +4,21 @@ variable "ssh_public_key" {}
variable "main_nic" {}
resource "lxd_container_file" "apt_cache" {
container_name = "${lxd_container.opennebula.name}"
target_file = "/etc/apt/apt.conf.d/01proxy"
source = "files/apt-proxy"
create_directories = true
}
resource "lxd_container_file" "authorized_keys" {
container_name = "${lxd_container.opennebula.name}"
target_file = "/root/.ssh/authorized_keys"
source = "~/.ssh/id_rsa.pub"
create_directories = true
}
resource "lxd_container" "opennebula" {
name = "opennebula"
image = "images:debian/stretch"
......@@ -21,23 +36,7 @@ resource "lxd_container" "opennebula" {
profiles = ["${lxd_profile.opennebula.name}"]
provisioner "local-exec" {
command = "lxc exec ${lxd_container.opennebula.name} -- bash -c 'mkdir -p /root/.ssh/ && echo ${var.ssh_public_key} > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys && chmod 700 /root/.ssh'"
}
provisioner "local-exec" {
command = "lxc exec ${lxd_container.opennebula.name} -- apt-get -y install openssh-server avahi-daemon && lxc exec ${lxd_container.opennebula.name} -- systemctl start sshd"
}
provisioner "local-exec" {
command = "cat files/apt-proxy | lxc exec ${lxd_container.opennebula.name} -- tee /etc/apt/apt.conf.d/01proxy"
}
provisioner "local-exec" {
command = "lxc exec ${lxd_container.opennebula.name} -- apt-get -y install gnupg python avahi-daemon openssh-server && lxc exec ${lxd_container.opennebula.name} -- systemctl start sshd && lxc exec ${lxd_container.opennebula.name} -- systemctl start avahi-daemon"
}
provisioner "local-exec" {
command = "export ANSIBLE_HOST_KEY_CHECKING=False && ansible-playbook -i ${lxd_container.opennebula.name}.local, playbook.yml -u root"
command = "lxc exec ${lxd_container.opennebula.name} -- apt-get -y install python gnupg openssh-server avahi-daemon && lxc exec ${lxd_container.opennebula.name} -- systemctl start sshd && lxc exec ${lxd_container.opennebula.name} -- systemctl start avahi-daemon"
}
}
......
output "ip" {
value = "${lxd_container.opennebula.ip_address}"
}
---
BUNDLE_PATH: "../.ruby-vendor"
BUNDLE_DISABLE_SHARED_GEMS: "true"
# frozen_string_literal: true
source "https://rubygems.org"
# gem "rails"
gem "sinatra"
gem "opennebula"
\ No newline at end of file
GEM
remote: https://rubygems.org/
specs:
builder (3.2.3)
json (2.1.0)
mini_portile2 (2.3.0)
mustermann (1.0.1)
nokogiri (1.8.1)
mini_portile2 (~> 2.3.0)
opennebula (5.4.5)
json
nokogiri
rbvmomi
rack (2.0.3)
rack-protection (2.0.0)
rack
rbvmomi (1.11.6)
builder (~> 3.0)
json (>= 1.8)
nokogiri (~> 1.5)
trollop (~> 2.1)
sinatra (2.0.0)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.0)
tilt (~> 2.0)
tilt (2.0.8)
trollop (2.1.2)
PLATFORMS
ruby
DEPENDENCIES
opennebula
sinatra
BUNDLED WITH
1.13.6
require 'opennebula'
include OpenNebula
# In this we create:
# * a group
# * a user, attached to the group
# * a vdc, owned by the group
# * a private network
#
client = OpenNebula::Client.new(ENV['ONE_AUTH'],ENV['ONE_ONED_HOST'])
print 'What username shall we give our group and admin? '
USERNAME = gets.chomp
DEFAULT_DATASTORE = [2]
DEFAULT_PUBLIC_NETWORK = []
DEFAULT_CLUSTER = [0]
# User ID needs to be mapped onto another system for billing
group_options = {
:resources => "VM+IMAGE+TEMPLATE+DOCUMENT+SECGROUP", # implicitly allow creation
:shared_resources => "VM+DATASTORE+IMAGE+TEMPLATE+DOCUMENT+SECGROUP", # implicitly allow use
:views => [:cloud],
:default_view => :cloud,
:default_admin_view => :cloud,
:group_admin => {
:name => USERNAME,
:password => "password"
},
:name => USERNAME
}
group_xml = Group.build_xml
group = Group.new(group_xml, client)
# More idiomatic form of the above:
#user = User.new(User.build_xml(User::SELF),client)
err = group.create(group_options)
if OpenNebula.is_error?(err)
STDERR.puts "group.allocate: #{err.message}"
else
puts "group.allocate: #{group.id.to_s}"
end
private_network = %{
#Configuration attributes (dummy driver)
NAME = "rfc1918-ovs #{USERNAME}"
DESCRIPTION = "An OVS public network for IP addresses"
VN_MAD = "ovswitch"
BRIDGE = "onebr0"
CLUSTERS = [
ID = 0
]
VLAN_IDS = [
START = "10",
RESERVED = "0, 1, 4095"
]
PERMISSIONS = [
OWNER_U = "1",
OWNER_M = "1",
OWNER_A = "0",
GROUP_U = "1",
GROUP_M = "1",
GROUP_A = "0",
OTHER_U = "0",
OTHER_M = "0",
OTHER_A = "0"
]
# Context attributes
NETWORK_ADDRESS = "172.16.0.0"
NETWORK_MASK = "255.240.0.0"
DNS = "192.168.0.1"
GATEWAY = "10.0.24.1"
#Address Ranges, only these addresses will be assigned to the VMs
AR=[TYPE = "IP4", IP = "172.16.0.5", SIZE = "150"]
}
network_xml = VirtualNetwork.build_xml
network = VirtualNetwork.new(network_xml, client)
err = network.allocate(private_network, -1)
if OpenNebula.is_error?(err)
STDERR.puts "network allocate fail: #{err.message}"
else
puts "network allocated: #{network.id.to_s}"
end
puts "#{group.admin_ids}"
err = network.chown(group.admin_ids.first, group.id)
if OpenNebula.is_error?(err)
STDERR.puts "chown fail: #{err.message}"
else
puts "chown successful: #{network.id.to_s}"
end
err = network.chmod_octet("600")
if OpenNebula.is_error?(err)
STDERR.puts "chmod fail: #{err.message}"
else
puts "chmod successful: #{network.id.to_s}"
end
acl = "@#{group.id} NET/##{network.id} USE"
default_acl_xml = Acl.build_xml()
default_acl = Acl.new(default_acl_xml, client)
parsed_rule = Acl.parse_rule(acl)
err = default_acl.allocate(parsed_rule[0], parsed_rule[1], parsed_rule[2])
if OpenNebula.is_error?(err)
STDERR.puts "acl addition failed: #{err.message}"
else
puts "successfully added acl: #{default_acl.id.to_s} #{parsed_rule}"
end
acl = "@#{group.id} NET/#0 USE"
default_acl_xml = Acl.build_xml()
default_acl = Acl.new(default_acl_xml, client)
parsed_rule = Acl.parse_rule(acl)
err = default_acl.allocate(parsed_rule[0], parsed_rule[1], parsed_rule[2])
if OpenNebula.is_error?(err)
STDERR.puts "acl addition failed: #{err.message}"
else
puts "successfully added acl: #{default_acl.id.to_s} #{parsed_rule}"
end
vdc_specification = %{
NAME = "#{USERNAME}"
}
vdc_xml = OpenNebula::Vdc.build_xml
vdc = OpenNebula::Vdc.new(vdc_xml, client)
err = vdc.allocate(vdc_specification)
if OpenNebula.is_error?(err)
STDERR.puts "vdc.allocate: #{err.message}"
else
puts "vdc.allocate: #{vdc.id.to_s}"
end
err = vdc.add_group(group.id)
if OpenNebula.is_error?(err)
STDERR.puts "vdc.add_group (#{group.id.to_s}): #{err.message}"
else
puts "vdc.add_group (#{group.id.to_s}): #{vdc.id.to_s}"
end
err = vdc.add_host(0, -10)
if OpenNebula.is_error?(err)
STDERR.puts "vdc.add_host (#{group.id.to_s}): #{err.message}"
else
puts "vdc.add_host (#{group.id.to_s}): #{vdc.id.to_s}"
end
err = vdc.add_datastore(0, -10)
if OpenNebula.is_error?(err)
STDERR.puts "vdc.add_datastore (#{group.id.to_s}): #{err.message}"
else
puts "vdc.add_datastore (#{group.id.to_s}): #{vdc.id.to_s}"
end
err = vdc.add_vnet(0, 0)
if OpenNebula.is_error?(err)
STDERR.puts "vdc.add_host (#{group.id.to_s}): #{err.message}"
else
puts "vdc.add_host (#{group.id.to_s}): #{vdc.id.to_s}"
end
default_vdc_xml = Vdc.build_xml(0)
default_vdc = Vdc.new(default_vdc_xml, client)
err = default_vdc.del_group(group.id)
if OpenNebula.is_error?(err)
STDERR.puts "default_vdc.del_group (#{group.id.to_s}): #{err.message}"
else
puts "default_vdc.del_group (#{group.id.to_s}): #{vdc.id.to_s}"
end
#err = default_vdc.del_vnet(0, network.id)
#if OpenNebula.is_error?(err)
# STDERR.puts "default_vdc.del_vnet (#{network.id.to_s}): #{err.message}"
#else
# puts "default_vdc.del_vnet (#{network.id.to_s}): #{vdc.id.to_s}"
#end
\ No newline at end of file
describe file('/etc/myapp.conf') do
it { should exist }
its('mode') { should cmp 0644 }
end
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment