]> gitweb.fluxo.info Git - puppet-nodo.git/commitdiff
Module organization
authorSilvio Rhatto <rhatto@riseup.net>
Tue, 23 Mar 2010 01:11:47 +0000 (22:11 -0300)
committerSilvio Rhatto <rhatto@riseup.net>
Tue, 23 Mar 2010 01:11:47 +0000 (22:11 -0300)
23 files changed:
manifests/desktop.pp [new file with mode: 0644]
manifests/init.pp
manifests/master.pp [new file with mode: 0644]
manifests/nodo.pp [new file with mode: 0644]
manifests/physical.pp [new file with mode: 0644]
manifests/proxy.pp [new file with mode: 0644]
manifests/server.pp [new file with mode: 0644]
manifests/storage.pp [new file with mode: 0644]
manifests/subsystems/database.pp [moved from manifests/database.pp with 100% similarity]
manifests/subsystems/firewall.pp [moved from manifests/firewall.pp with 100% similarity]
manifests/subsystems/firewire.pp [moved from manifests/firewire.pp with 100% similarity]
manifests/subsystems/initramfs.pp [moved from manifests/initramfs.pp with 100% similarity]
manifests/subsystems/lsb.pp [moved from manifests/lsb.pp with 100% similarity]
manifests/subsystems/motd.pp [moved from manifests/motd.pp with 100% similarity]
manifests/subsystems/munin.pp [moved from manifests/munin.pp with 100% similarity]
manifests/subsystems/sudo.pp [moved from manifests/sudo.pp with 100% similarity]
manifests/subsystems/sysctl.pp [moved from manifests/sysctl.pp with 100% similarity]
manifests/subsystems/ups.pp [moved from manifests/ups.pp with 100% similarity]
manifests/subsystems/utils.pp [moved from manifests/utils.pp with 100% similarity]
manifests/subsystems/websites.pp [moved from manifests/websites.pp with 100% similarity]
manifests/test.pp [new file with mode: 0644]
manifests/vserver.pp [new file with mode: 0644]
manifests/web.pp [new file with mode: 0644]

diff --git a/manifests/desktop.pp b/manifests/desktop.pp
new file mode 100644 (file)
index 0000000..686801b
--- /dev/null
@@ -0,0 +1,63 @@
+class nodo::desktop inherits nodo::physical {
+  include utils::desktop
+
+  # fstab
+  file { "/etc/fstab":
+    source  => "puppet://$desktop/modules/nodo/etc/fstab/desktop",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  # crypttab
+  file { "/etc/crypttab":
+    source  => "puppet://$desktop/modules/nodo/etc/crypttab/desktop",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  # data
+  file { "/var/data":
+    ensure => directory,
+    mode   => 0755,
+  }
+
+  # pam - login
+  file { "/etc/pam.d/login":
+    source  => "puppet://$desktop/modules/nodo/etc/pam.d/login",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  # pam - gdm
+  file { "/etc/pam.d/gdm":
+    source  => "puppet://$desktop/modules/nodo/etc/pam.d/gdm",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  # pam - mountpoints
+  file { "/etc/security/pam_mount.conf.xml":
+    ensure  => present,
+    owner   => root,
+    group   => root,
+    mode    => 0644,
+    source  => "puppet://$server/files/etc/security/pam_mount.conf.xml",
+  }
+
+  # xorg
+  file { "/etc/X11/xorg.conf":
+    ensure  => present,
+    owner   => root,
+    group   => root,
+    mode    => 0644,
+    source  => "puppet://$server/files/etc/X11/xorg.conf/$hostname",
+  }
+}
index fc50a5ff61934f7ba37951d433458a9608222ac6..5e597a2b8c9e155b9389bd5bfc782acf396afde5 100644 (file)
 # Nodo class definitions
 #
 
-import "firewall.pp"
-import "firewire.pp"
-import "initramfs.pp"
-import "lsb.pp"
-import "motd.pp"
-import "sudo.pp"
-import "sysctl.pp"
-import "ups.pp"
-import "utils.pp"
-import "database.pp"
-import "websites.pp"
-import "munin.pp"
-
-class nodo {
-  include lsb
-  include puppetd
-  include backup
-  include exim
-  include sudo
-  include users::admin
-  include motd
-  include utils
-  include cron
-
-  # Set timezone and ntp config
-  #
-  # We config those here but leave class inclusion elsewhere
-  # as ntp config differ from server to vserver.
-  #
-  $ntp_timezone = "Brazil/East"
-  $ntp_pool     = "south-america.pool.ntp.org"
-  $ntp_servers  = [ 'a.ntp.br', 'b.ntp.br', 'c.ntp.br' ]
-
-  # Monkeysphere
-  #
-  # Currently we don't have a defined policy regarding whether
-  # to publish all our node keys to public keyservers, so leave
-  # automatic publishing disabled for now.
-  #
-  $monkeysphere_publish_key = false
-  include monkeysphere
-
-  # Apt configuration
-  $backports_enabled = true
-  $apt_update_method = 'cron'
-  include apt
-
-  # Default SSH configuration
-  $sshd_password_authentication = "yes"
-  $sshd_shared_ip               = "yes"
-
-  file { "/etc/hostname":
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-    content => "$fqdn\n",
-  }
-
-  host { "$hostname":
-    ensure => present,
-    ip     => "$ipaddress",
-    alias  => [ "$fqdn" ],
-  }
-
-  file { "/etc/rc.local":
-    source  => "puppet://$server/modules/nodo/etc/rc.local",
-    owner   => "root",
-    group   => "root",
-    mode    => 0755,
-    ensure  => present,
-  }
-
-  file { "/etc/screenrc":
-    source  => "puppet://$server/modules/nodo/etc/screenrc",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  file { "/etc/profile":
-    source  => "puppet://$server/modules/nodo/etc/profile",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-    require => File['/usr/local/bin/prompt.sh'],
-  }
-
-  file { "/etc/bash.bashrc":
-    source  => "puppet://$server/modules/nodo/etc/bash.bashrc",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-    require => File['/usr/local/bin/prompt.sh'],
-  }
-
-  file { "/usr/local/bin/prompt.sh":
-    source  => "puppet://$server/modules/nodo/bin/prompt.sh",
-    owner   => "root",
-    group   => "root",
-    mode    => 0755,
-    ensure  => present,
-  }
-}
-
-class nodo::physical inherits nodo {
-  include syslog-ng
-  include firewall
-  include vserver::host
-  include initramfs
-  include firewire
-  include sysctl
-  include ups
-  include utils::physical
-  include smartmontools
-
-  # Time configuration
-  case $ntpdate {
-    false:   { include timezone }
-    default: { include ntpdate  }
-  }
-
-  # DNS resolver
-  $resolvconf_domain = "$domain"
-  $resolvconf_search = "$fqdn"
-  include resolvconf
-
-  # SSH Server
-  #
-  # We need to restrict listen address so multiple instances
-  # can live together in the same physical host.
-  #
-  case $sshd_listen_address {
-    '': { $sshd_listen_address = [ "$ipaddress" ] }
-  }
-  include sshd
-
-  backupninja::sys { "sys":
-    ensure => present,
-  }
-
-  # Munin configuration
-  munin_node { "$hostname":
-    port => '4900',
-  }
-}
-
-class nodo::server inherits nodo::physical {
-  # fstab
-  file { "/etc/fstab":
-    source  => "puppet://$server/modules/nodo/etc/fstab/server",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  # crypttab
-  file { "/etc/crypttab":
-    source  => "puppet://$server/modules/nodo/etc/crypttab/server",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-}
-
-class nodo::desktop inherits nodo::physical {
-  include utils::desktop
-
-  # fstab
-  file { "/etc/fstab":
-    source  => "puppet://$desktop/modules/nodo/etc/fstab/desktop",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  # crypttab
-  file { "/etc/crypttab":
-    source  => "puppet://$desktop/modules/nodo/etc/crypttab/desktop",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  # data
-  file { "/var/data":
-    ensure => directory,
-    mode   => 0755,
-  }
-
-  # pam - login
-  file { "/etc/pam.d/login":
-    source  => "puppet://$desktop/modules/nodo/etc/pam.d/login",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  # pam - gdm
-  file { "/etc/pam.d/gdm":
-    source  => "puppet://$desktop/modules/nodo/etc/pam.d/gdm",
-    owner   => "root",
-    group   => "root",
-    mode    => 0644,
-    ensure  => present,
-  }
-
-  # pam - mountpoints
-  file { "/etc/security/pam_mount.conf.xml":
-    ensure  => present,
-    owner   => root,
-    group   => root,
-    mode    => 0644,
-    source  => "puppet://$server/files/etc/security/pam_mount.conf.xml",
-  }
-
-  # xorg
-  file { "/etc/X11/xorg.conf":
-    ensure  => present,
-    owner   => root,
-    group   => root,
-    mode    => 0644,
-    source  => "puppet://$server/files/etc/X11/xorg.conf/$hostname",
-  }
-}
-
-class nodo::vserver inherits nodo {
-  include sshd
-  include timezone
-  include syslog-ng::vserver
-
-  backupninja::sys { "sys":
-    ensure     => present,
-    partitions => false,
-    hardware   => false,
-    dosfdisk   => false,
-    dohwinfo   => false,
-  }
-
-  $hosting_type = $node_hosting_type ? {
-    ''      => "direct",
-    default => "$node_hosting_type",
-  }
-
-  case $hosting_type {
-    "direct": {
-      # Apply munin configuration for this node for
-      # directly hosted nodes.
-      Munin_node <<| title == $hostname |>>
-    }
-    "third-party": {
-      # Apply munin configuration for this node for third-party
-      # hosted nodes.
-      munin_node { "$hostname": }
-    }
-  }
-
-  # Define a vserver instance
-  define instance($context, $ensure = 'running', $proxy = false,
-                  $puppetmaster = false, $gitd = false,
-                  $icecast = false, $sound = false, $ticket = false,
-                  $memory_limit = false) {
-
-    # set instance id
-    if $context < 9 {
-      $id = "0$context"
-    } else {
-      $id = $context
-    }
-
-    vserver { $name:
-      ensure       => $ensure,
-      context      => "$context",
-      mark         => 'default',
-      distro       => 'lenny',
-      interface    => "eth0:192.168.0.$context/24",
-      hostname     => "$name.$domain",
-      memory_limit => $memory_limit,
-    }
-
-    # Some nodes need a lot of space at /tmp otherwise some admin
-    # tasks like backups might not run.
-    file { "/etc/vservers/${name}/fstab":
-      source  => "puppet://$server/modules/nodo/etc/fstab/vserver",
-      owner   => "root",
-      group   => "root",
-      mode    => 0644,
-      ensure  => present,
-      notify  => Exec["vs_restart_${name}"],
-      require => Exec["vs_create_${name}"],
-    }
-
-    # Create a munin virtual resource to be realized in the node
-    @@munin_node { "$name":
-      port => "49$id",
-    }
-
-    # Sound support
-    if $sound {
-      if !defined(File["/usr/local/sbin/create-sound-devices"]) {
-        file { "/usr/local/sbin/create-sound-devices":
-          ensure => present,
-          source => "puppet://$server/modules/nodo/sound/devices.sh",
-          owner  => root,
-          group  => root,
-          mode   => 755,
-        }
-      }
-      exec { "/usr/local/sbin/create-sound-devices ${name}":
-        unless  => "/usr/local/sbin/create-sound-devices ${name} --check",
-        user    => root,
-        require => [ Exec["vs_create_${name}"], File["/usr/local/sbin/create-sound-devices"] ],
-      }
-    }
-
-    # Apply firewall rules just for running vservers
-    case $ensure {
-      'running': {
-
-        shorewall::rule { "ssh-$context-1":
-          action          => 'DNAT',
-          source          => 'net',
-          destination     => "vm:192.168.0.$context:22",
-          proto           => 'tcp',
-          destinationport => "22$id",
-          ratelimit       => '-',
-          order           => "2$id",
-        }
-
-        shorewall::rule { "ssh-$context-2":
-          action          => 'DNAT',
-          source          => '$FW',
-          destination     => "fw:192.168.0.$context:22",
-          proto           => 'tcp',
-          destinationport => "22$id",
-          originaldest    => "$ipaddress",
-          ratelimit       => '-',
-          order           => "3$id",
-        }
-
-        shorewall::rule { "munin-$context-1":
-          action          => 'DNAT',
-          source          => 'net',
-          destination     => "fw:192.168.0.$context:49$id",
-          proto           => 'tcp',
-          destinationport => "49$id",
-          ratelimit       => '-',
-          order           => "4$id",
-        }
-
-        shorewall::rule { "munin-$context-2":
-          action          => 'DNAT',
-          source          => '$FW',
-          destination     => "fw:192.168.0.$context:49$id",
-          proto           => 'tcp',
-          destinationport => "49$id",
-          originaldest    => "$ipaddress",
-          ratelimit       => '-',
-          order           => "5$id",
-        }
-
-        if $proxy {
-          shorewall::rule { 'http-route-1':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "vm:192.168.0.$context:80",
-            proto           => 'tcp',
-            destinationport => '80',
-            ratelimit       => '-',
-            order           => '600',
-          }
-
-          shorewall::rule { 'http-route-2':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:80",
-            proto           => 'tcp',
-            destinationport => '80',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '601',
-          }
-
-          shorewall::rule { 'https-route-1':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "vm:192.168.0.$context:443",
-            proto           => 'tcp',
-            destinationport => '443',
-            ratelimit       => '-',
-            order           => '602',
-          }
-
-          shorewall::rule { 'https-route-2':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:443",
-            proto           => 'tcp',
-            destinationport => '443',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '602',
-          }
-        }
-
-        if $puppetmaster {
-          shorewall::rule { 'puppetmaster-1':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:8140",
-            proto           => 'tcp',
-            destinationport => '8140',
-            ratelimit       => '-',
-            order           => '700',
-          }
-
-          shorewall::rule { 'puppetmaster-2':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:8140",
-            proto           => 'udp',
-            destinationport => '8140',
-            ratelimit       => '-',
-            order           => '701',
-          }
-
-          shorewall::rule { 'puppetmaster-3':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:8140",
-            proto           => 'tcp',
-            destinationport => '8140',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '702',
-          }
-
-          shorewall::rule { 'puppetmaster-4':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:8140",
-            proto           => 'udp',
-            destinationport => '8140',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '703',
-          }
-
-          shorewall::rule { 'puppetmaster-5':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:8141",
-            proto           => 'tcp',
-            destinationport => '8141',
-            ratelimit       => '-',
-            order           => '704',
-          }
-
-          shorewall::rule { 'puppetmaster-6':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:8141",
-            proto           => 'udp',
-            destinationport => '8141',
-            ratelimit       => '-',
-            order           => '705',
-          }
-
-          shorewall::rule { 'puppetmaster-7':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:8141",
-            proto           => 'tcp',
-            destinationport => '8141',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '706',
-          }
-
-          shorewall::rule { 'puppetmaster-8':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:8141",
-            proto           => 'udp',
-            destinationport => '8141',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '707',
-          }
-        }
-
-        if $gitd {
-          shorewall::rule { 'git-daemon-1':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:9418",
-            proto           => 'tcp',
-            destinationport => '9418',
-            ratelimit       => '-',
-            order           => '800',
-          }
-
-          shorewall::rule { 'git-daemon-2':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:9418",
-            proto           => 'tcp',
-            destinationport => '9418',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '801',
-          }
-        }
-
-        if $icecast {
-          shorewall::rule { 'icecast-1':
-            action          => 'DNAT',
-            source          => 'net',
-            destination     => "fw:192.168.0.$context:8000",
-            proto           => 'tcp',
-            destinationport => '8000',
-            ratelimit       => '-',
-            order           => '900',
-          }
-
-          shorewall::rule { 'icecast-2':
-            action          => 'DNAT',
-            source          => '$FW',
-            destination     => "fw:192.168.0.$context:8000",
-            proto           => 'tcp',
-            destinationport => '8000',
-            originaldest    => "$ipaddress",
-            ratelimit       => '-',
-            order           => '901',
-          }
-        }
-      }
-    }
-  }
-}
-
-class nodo::web inherits nodo::vserver {
-  include git-daemon
-  include websites
-  include database
-  include users::virtual
-  include utils::web
-
-  backupninja::svn { "svn":
-    src => "/var/svn",
-  }
-
-  backupninja::mysql { "all_databases":
-       backupdir => '/var/backups/mysql',
-       compress  => true,
-       sqldump   => true,
-  }
-}
-
-class nodo::master {
-  # Puppetmaster should be included before nodo::vserver
-  include puppetmasterd
-  include nodo::vserver
-  include database
-  include gitosis
-  include websites::admin
-
-  case $main_master {
-    '': { fail("You need to define if this is the main master! Please set \$main_master in host config") }
-  }
-
-  if $main_master == true {
-    include munin::host
-
-    # The main master has a host entry pointing to itself, other
-    # masters still retrieve catalogs from the main master.
-    host { "puppet":
-      ensure => present,
-      ip     => "127.0.0.1",
-      alias  => ["puppet.$domain"],
-    }
-  } else {
-    host { "puppet":
-      ensure => absent,
-    }
-  }
-
-  case $puppetmaster_db_password {
-    '': { fail("Please set \$puppetmaster_db_password in your host config") }
-  }
-
-  # update master's puppet.conf if you change here
-  database::instance { "puppet":
-    password => "$puppetmaster_db_password",
-  }
-
-  backupninja::mysql { "all_databases":
-       backupdir => '/var/backups/mysql',
-       compress  => true,
-       sqldump   => true,
-  }
-
-  # used for trac dependency graphs
-  package { "graphviz":
-    ensure => present,
-  }
-}
-
-class nodo::proxy inherits nodo::vserver {
-  include nginx
-}
-
-class nodo::storage inherits nodo::vserver {
-  # Class for backup nodes
-  include utils::storage
-}
-
-class nodo::test inherits nodo::web {
-  # Class for test nodes
-}
+# Import subsystems
+import "subsystems/firewall.pp"
+import "subsystems/firewire.pp"
+import "subsystems/initramfs.pp"
+import "subsystems/lsb.pp"
+import "subsystems/motd.pp"
+import "subsystems/sudo.pp"
+import "subsystems/sysctl.pp"
+import "subsystems/ups.pp"
+import "subsystems/utils.pp"
+import "subsystems/database.pp"
+import "subsystems/websites.pp"
+import "subsystems/munin.pp"
+
+# Import nodo classes
+import "nodo.pp"
+import "physical.pp"
+import "server.pp"
+import "desktop.pp"
+import "vserver.pp"
+import "web.pp"
+import "master.pp"
+import "proxy.pp"
+import "storage.pp"
+import "test.pp"
diff --git a/manifests/master.pp b/manifests/master.pp
new file mode 100644 (file)
index 0000000..b07866e
--- /dev/null
@@ -0,0 +1,48 @@
+class nodo::master {
+  # Puppetmaster should be included before nodo::vserver
+  include puppetmasterd
+  include nodo::vserver
+  include database
+  include gitosis
+  include websites::admin
+
+  case $main_master {
+    '': { fail("You need to define if this is the main master! Please set \$main_master in host config") }
+  }
+
+  if $main_master == true {
+    include munin::host
+
+    # The main master has a host entry pointing to itself, other
+    # masters still retrieve catalogs from the main master.
+    host { "puppet":
+      ensure => present,
+      ip     => "127.0.0.1",
+      alias  => ["puppet.$domain"],
+    }
+  } else {
+    host { "puppet":
+      ensure => absent,
+    }
+  }
+
+  case $puppetmaster_db_password {
+    '': { fail("Please set \$puppetmaster_db_password in your host config") }
+  }
+
+  # update master's puppet.conf if you change here
+  database::instance { "puppet":
+    password => "$puppetmaster_db_password",
+  }
+
+  backupninja::mysql { "all_databases":
+       backupdir => '/var/backups/mysql',
+       compress  => true,
+       sqldump   => true,
+  }
+
+  # used for trac dependency graphs
+  package { "graphviz":
+    ensure => present,
+  }
+}
diff --git a/manifests/nodo.pp b/manifests/nodo.pp
new file mode 100644 (file)
index 0000000..5e5436e
--- /dev/null
@@ -0,0 +1,94 @@
+class nodo {
+  include lsb
+  include puppetd
+  include backup
+  include exim
+  include sudo
+  include users::admin
+  include motd
+  include utils
+  include cron
+
+  # Set timezone and ntp config
+  #
+  # We config those here but leave class inclusion elsewhere
+  # as ntp config differ from server to vserver.
+  #
+  $ntp_timezone = "Brazil/East"
+  $ntp_pool     = "south-america.pool.ntp.org"
+  $ntp_servers  = [ 'a.ntp.br', 'b.ntp.br', 'c.ntp.br' ]
+
+  # Monkeysphere
+  #
+  # Currently we don't have a defined policy regarding whether
+  # to publish all our node keys to public keyservers, so leave
+  # automatic publishing disabled for now.
+  #
+  $monkeysphere_publish_key = false
+  include monkeysphere
+
+  # Apt configuration
+  $backports_enabled = true
+  $apt_update_method = 'cron'
+  include apt
+
+  # Default SSH configuration
+  $sshd_password_authentication = "yes"
+  $sshd_shared_ip               = "yes"
+
+  file { "/etc/hostname":
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+    content => "$fqdn\n",
+  }
+
+  host { "$hostname":
+    ensure => present,
+    ip     => "$ipaddress",
+    alias  => [ "$fqdn" ],
+  }
+
+  file { "/etc/rc.local":
+    source  => "puppet://$server/modules/nodo/etc/rc.local",
+    owner   => "root",
+    group   => "root",
+    mode    => 0755,
+    ensure  => present,
+  }
+
+  file { "/etc/screenrc":
+    source  => "puppet://$server/modules/nodo/etc/screenrc",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  file { "/etc/profile":
+    source  => "puppet://$server/modules/nodo/etc/profile",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+    require => File['/usr/local/bin/prompt.sh'],
+  }
+
+  file { "/etc/bash.bashrc":
+    source  => "puppet://$server/modules/nodo/etc/bash.bashrc",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+    require => File['/usr/local/bin/prompt.sh'],
+  }
+
+  file { "/usr/local/bin/prompt.sh":
+    source  => "puppet://$server/modules/nodo/bin/prompt.sh",
+    owner   => "root",
+    group   => "root",
+    mode    => 0755,
+    ensure  => present,
+  }
+}
diff --git a/manifests/physical.pp b/manifests/physical.pp
new file mode 100644 (file)
index 0000000..d1ade0c
--- /dev/null
@@ -0,0 +1,41 @@
+class nodo::physical inherits nodo {
+  include syslog-ng
+  include firewall
+  include vserver::host
+  include initramfs
+  include firewire
+  include sysctl
+  include ups
+  include utils::physical
+  include smartmontools
+
+  # Time configuration
+  case $ntpdate {
+    false:   { include timezone }
+    default: { include ntpdate  }
+  }
+
+  # DNS resolver
+  $resolvconf_domain = "$domain"
+  $resolvconf_search = "$fqdn"
+  include resolvconf
+
+  # SSH Server
+  #
+  # We need to restrict listen address so multiple instances
+  # can live together in the same physical host.
+  #
+  case $sshd_listen_address {
+    '': { $sshd_listen_address = [ "$ipaddress" ] }
+  }
+  include sshd
+
+  backupninja::sys { "sys":
+    ensure => present,
+  }
+
+  # Munin configuration
+  munin_node { "$hostname":
+    port => '4900',
+  }
+}
diff --git a/manifests/proxy.pp b/manifests/proxy.pp
new file mode 100644 (file)
index 0000000..51dac33
--- /dev/null
@@ -0,0 +1,3 @@
+class nodo::proxy inherits nodo::vserver {
+  include nginx
+}
diff --git a/manifests/server.pp b/manifests/server.pp
new file mode 100644 (file)
index 0000000..2300889
--- /dev/null
@@ -0,0 +1,19 @@
+class nodo::server inherits nodo::physical {
+  # fstab
+  file { "/etc/fstab":
+    source  => "puppet://$server/modules/nodo/etc/fstab/server",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+
+  # crypttab
+  file { "/etc/crypttab":
+    source  => "puppet://$server/modules/nodo/etc/crypttab/server",
+    owner   => "root",
+    group   => "root",
+    mode    => 0644,
+    ensure  => present,
+  }
+}
diff --git a/manifests/storage.pp b/manifests/storage.pp
new file mode 100644 (file)
index 0000000..5bb7e72
--- /dev/null
@@ -0,0 +1,4 @@
+class nodo::storage inherits nodo::vserver {
+  # Class for backup nodes
+  include utils::storage
+}
similarity index 100%
rename from manifests/lsb.pp
rename to manifests/subsystems/lsb.pp
similarity index 100%
rename from manifests/ups.pp
rename to manifests/subsystems/ups.pp
diff --git a/manifests/test.pp b/manifests/test.pp
new file mode 100644 (file)
index 0000000..7195fc2
--- /dev/null
@@ -0,0 +1,3 @@
+class nodo::test inherits nodo::web {
+  # Class for test nodes
+}
diff --git a/manifests/vserver.pp b/manifests/vserver.pp
new file mode 100644 (file)
index 0000000..14b1e28
--- /dev/null
@@ -0,0 +1,314 @@
+class nodo::vserver inherits nodo {
+  include sshd
+  include timezone
+  include syslog-ng::vserver
+
+  backupninja::sys { "sys":
+    ensure     => present,
+    partitions => false,
+    hardware   => false,
+    dosfdisk   => false,
+    dohwinfo   => false,
+  }
+
+  $hosting_type = $node_hosting_type ? {
+    ''      => "direct",
+    default => "$node_hosting_type",
+  }
+
+  case $hosting_type {
+    "direct": {
+      # Apply munin configuration for this node for
+      # directly hosted nodes.
+      Munin_node <<| title == $hostname |>>
+    }
+    "third-party": {
+      # Apply munin configuration for this node for third-party
+      # hosted nodes.
+      munin_node { "$hostname": }
+    }
+  }
+
+  # Define a vserver instance
+  define instance($context, $ensure = 'running', $proxy = false,
+                  $puppetmaster = false, $gitd = false,
+                  $icecast = false, $sound = false, $ticket = false,
+                  $memory_limit = false) {
+
+    # set instance id
+    if $context < 9 {
+      $id = "0$context"
+    } else {
+      $id = $context
+    }
+
+    vserver { $name:
+      ensure       => $ensure,
+      context      => "$context",
+      mark         => 'default',
+      distro       => 'lenny',
+      interface    => "eth0:192.168.0.$context/24",
+      hostname     => "$name.$domain",
+      memory_limit => $memory_limit,
+    }
+
+    # Some nodes need a lot of space at /tmp otherwise some admin
+    # tasks like backups might not run.
+    file { "/etc/vservers/${name}/fstab":
+      source  => "puppet://$server/modules/nodo/etc/fstab/vserver",
+      owner   => "root",
+      group   => "root",
+      mode    => 0644,
+      ensure  => present,
+      notify  => Exec["vs_restart_${name}"],
+      require => Exec["vs_create_${name}"],
+    }
+
+    # Create a munin virtual resource to be realized in the node
+    @@munin_node { "$name":
+      port => "49$id",
+    }
+
+    # Sound support
+    if $sound {
+      if !defined(File["/usr/local/sbin/create-sound-devices"]) {
+        file { "/usr/local/sbin/create-sound-devices":
+          ensure => present,
+          source => "puppet://$server/modules/nodo/sound/devices.sh",
+          owner  => root,
+          group  => root,
+          mode   => 755,
+        }
+      }
+      exec { "/usr/local/sbin/create-sound-devices ${name}":
+        unless  => "/usr/local/sbin/create-sound-devices ${name} --check",
+        user    => root,
+        require => [ Exec["vs_create_${name}"], File["/usr/local/sbin/create-sound-devices"] ],
+      }
+    }
+
+    # Apply firewall rules just for running vservers
+    case $ensure {
+      'running': {
+
+        shorewall::rule { "ssh-$context-1":
+          action          => 'DNAT',
+          source          => 'net',
+          destination     => "vm:192.168.0.$context:22",
+          proto           => 'tcp',
+          destinationport => "22$id",
+          ratelimit       => '-',
+          order           => "2$id",
+        }
+
+        shorewall::rule { "ssh-$context-2":
+          action          => 'DNAT',
+          source          => '$FW',
+          destination     => "fw:192.168.0.$context:22",
+          proto           => 'tcp',
+          destinationport => "22$id",
+          originaldest    => "$ipaddress",
+          ratelimit       => '-',
+          order           => "3$id",
+        }
+
+        shorewall::rule { "munin-$context-1":
+          action          => 'DNAT',
+          source          => 'net',
+          destination     => "fw:192.168.0.$context:49$id",
+          proto           => 'tcp',
+          destinationport => "49$id",
+          ratelimit       => '-',
+          order           => "4$id",
+        }
+
+        shorewall::rule { "munin-$context-2":
+          action          => 'DNAT',
+          source          => '$FW',
+          destination     => "fw:192.168.0.$context:49$id",
+          proto           => 'tcp',
+          destinationport => "49$id",
+          originaldest    => "$ipaddress",
+          ratelimit       => '-',
+          order           => "5$id",
+        }
+
+        if $proxy {
+          shorewall::rule { 'http-route-1':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "vm:192.168.0.$context:80",
+            proto           => 'tcp',
+            destinationport => '80',
+            ratelimit       => '-',
+            order           => '600',
+          }
+
+          shorewall::rule { 'http-route-2':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:80",
+            proto           => 'tcp',
+            destinationport => '80',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '601',
+          }
+
+          shorewall::rule { 'https-route-1':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "vm:192.168.0.$context:443",
+            proto           => 'tcp',
+            destinationport => '443',
+            ratelimit       => '-',
+            order           => '602',
+          }
+
+          shorewall::rule { 'https-route-2':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:443",
+            proto           => 'tcp',
+            destinationport => '443',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '602',
+          }
+        }
+
+        if $puppetmaster {
+          shorewall::rule { 'puppetmaster-1':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:8140",
+            proto           => 'tcp',
+            destinationport => '8140',
+            ratelimit       => '-',
+            order           => '700',
+          }
+
+          shorewall::rule { 'puppetmaster-2':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:8140",
+            proto           => 'udp',
+            destinationport => '8140',
+            ratelimit       => '-',
+            order           => '701',
+          }
+
+          shorewall::rule { 'puppetmaster-3':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:8140",
+            proto           => 'tcp',
+            destinationport => '8140',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '702',
+          }
+
+          shorewall::rule { 'puppetmaster-4':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:8140",
+            proto           => 'udp',
+            destinationport => '8140',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '703',
+          }
+
+          shorewall::rule { 'puppetmaster-5':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:8141",
+            proto           => 'tcp',
+            destinationport => '8141',
+            ratelimit       => '-',
+            order           => '704',
+          }
+
+          shorewall::rule { 'puppetmaster-6':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:8141",
+            proto           => 'udp',
+            destinationport => '8141',
+            ratelimit       => '-',
+            order           => '705',
+          }
+
+          shorewall::rule { 'puppetmaster-7':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:8141",
+            proto           => 'tcp',
+            destinationport => '8141',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '706',
+          }
+
+          shorewall::rule { 'puppetmaster-8':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:8141",
+            proto           => 'udp',
+            destinationport => '8141',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '707',
+          }
+        }
+
+        if $gitd {
+          shorewall::rule { 'git-daemon-1':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:9418",
+            proto           => 'tcp',
+            destinationport => '9418',
+            ratelimit       => '-',
+            order           => '800',
+          }
+
+          shorewall::rule { 'git-daemon-2':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:9418",
+            proto           => 'tcp',
+            destinationport => '9418',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '801',
+          }
+        }
+
+        if $icecast {
+          shorewall::rule { 'icecast-1':
+            action          => 'DNAT',
+            source          => 'net',
+            destination     => "fw:192.168.0.$context:8000",
+            proto           => 'tcp',
+            destinationport => '8000',
+            ratelimit       => '-',
+            order           => '900',
+          }
+
+          shorewall::rule { 'icecast-2':
+            action          => 'DNAT',
+            source          => '$FW',
+            destination     => "fw:192.168.0.$context:8000",
+            proto           => 'tcp',
+            destinationport => '8000',
+            originaldest    => "$ipaddress",
+            ratelimit       => '-',
+            order           => '901',
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/manifests/web.pp b/manifests/web.pp
new file mode 100644 (file)
index 0000000..09aec4d
--- /dev/null
@@ -0,0 +1,17 @@
+class nodo::web inherits nodo::vserver {
+  include git-daemon
+  include websites
+  include database
+  include users::virtual
+  include utils::web
+
+  backupninja::svn { "svn":
+    src => "/var/svn",
+  }
+
+  backupninja::mysql { "all_databases":
+       backupdir => '/var/backups/mysql',
+       compress  => true,
+       sqldump   => true,
+  }
+}