четверг, 12 декабря 2013 г.

Скрипты puppet для развертывания отказоустойчивого кластера XEN over DRBD over LVM (+Heartbeat) на базе ОС Linux Debian 5.04 (lenny).





nodes.pp

node clusternode inherits basenode
{
# -------------------------------------------------------------
    include xen-dom0-kernel
    include xen-domu-config
# -------------------------------------------------------------
    include "lvm2"
    lvm2::kern_module { "dm_mod": ensure => present }
    lvm2::kern_module { "dm_mirror": ensure => present }
    lvm2::kern_module { "dm_snapshot": ensure => present }
# -------------------------------------------------------------
    include "drbd8"
    drbd8::kern_module { "drbd": ensure => present }
# -------------------------------------------------------------
    include "heartbeat"
# -------------------------------------------------------------
    include "xen-domu-lvm-drbd-backup"
# -------------------------------------------------------------   
   include cron
# -------------------------------------------------------------
   
}


node 'clu-servd1.chelgipromez.local' inherits clusternode
{

# --------------------------------------   
    include "interfaces"
    include "bonding"
   
    interfaces::add_lo_nic { puppet: }
    interfaces::add_static_nic { puppet: nicname => "eth4", primaryaddress => "172.16.0.101", primarynetmask => "255.255.0.0" }
   
    bonding::kern_module { "bonding": ensure => present }
    interfaces::add_bonded_nic { puppet: nicname => "bond0", primaryaddress => "10.0.0.1", primarynetmask => "255.0.0.0", physical_nics => "eth0 eth1 eth2 eth3" }
    interfaces::restart_network { puppet: }
# --------------------------------------
   
}

node 'clu-servd3.chelgipromez.local' inherits clusternode
{

# --------------------------------------   
    include "interfaces"
    include "bonding"
   
    interfaces::add_lo_nic { puppet: }
    interfaces::add_static_nic { puppet: nicname => "eth4", primaryaddress => "172.16.0.103", primarynetmask => "255.255.0.0" }
   
    bonding::kern_module { "bonding": ensure => present }
    interfaces::add_bonded_nic { puppet: nicname => "bond0", primaryaddress => "10.0.0.2", primarynetmask => "255.0.0.0", physical_nics => "eth0 eth1 eth2 eth3" }
    interfaces::restart_network { puppet: }

# --------------------------------------

}



/etc/puppet/modules/xen-dom0-kernel/manifests/init.pp
class xen-dom0-kernel
{

  exec { "reboot":
    command => "/sbin/reboot",
    subscribe => Package["linux-image-2.6.26-2-xen-amd64"],
    refreshonly => true
  }
 
  package { "linux-image-2.6.26-2-xen-amd64":
      ensure => "2.6.26-21"
      }
  package { "bridge-utils":
      ensure => installed
      }
 
  file { "/etc/xen/xend-config.sxp":
            owner    => root,
            group    => root,
            mode     => 0644,
            source  => "puppet:///xen-linux-system/xend-config.sxp";
      }
 
  file { "/etc/xen/config":
        ensure => "directory"
    }

}


/etc/puppet/modules/xen-domu-config/manifests/init.pp

class xen-domu-config
{
  file { "/etc/xen/config/ideco":
            owner    => root,
            group    => root,
            mode     => 0644,
            source  => "puppet:///xen-domu-config/ideco";
      }
}


/etc/puppet/modules/xen-domu-config/files/ideco

import os, re
arch = os.uname()[4]

kernel = "/usr/lib/xen/boot/hvmloader"
builder = 'hvm'
device_model = "/usr/lib/xen/bin/qemu-dm"

memory = 256
name = "ideco"
vcpus = 1

vif = [ 'mac=00:16:3e:60:02:b4,type=ioemu,bridge=eth4','mac=00:16:3e:5a:1d:21,type=ioemu,bridge=eth4' ]

disk = [ 'phy:/dev/drbd0,xvda,w', 'file:/home/user/distrib/ideco/IdecoICS_341_115.iso,xvdc:cdrom,r' ]
#disk = [ 'phy:/dev/drbd0,xvda,w' ]

boot="dc"

acpi = 1
apic = 1
stdvga=0

sdl=0
vnc=1
vncpasswd=''
vnclisten="0.0.0.0"

on_reboot = 'restart'
on_crash = 'restart'
on_powerof = 'destroy'


/etc/puppet/modules/lvm2/manifests/init.pp


class lvm2
{
    package { "lvm2": ensure => installed }

    define kern_module ($ensure)
    {
        $modulesfile = "/etc/modules"
        case $ensure
        {
            present:
            {
                exec
                { "insert_module_${name}":
                        command => "/bin/echo '${name}' >> '${modulesfile}' && /sbin/modprobe '${name}'",
                        unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
                }
            }
    
            absent:
            {
                exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
                exec
                { "remove_module_${name}":
                    command => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'"
                }
            }
    
            default: { err ( "unknown ensure value ${ensure}" ) }
        }
    }

}


/etc/puppet/modules/drbd8/manifests/init.pp


class drbd8
{
    package { "drbd8-modules-2.6.26-2-xen-amd64": ensure => installed }
    package { "drbd8-utils": ensure => installed }
  
    file { "/etc/drbd.conf":
            owner    => root,
            group    => root,
            mode     => 0644,
            source  => "puppet:///drbd8/drbd.conf";
      }

    define kern_module ($ensure)
    {
        $modulesfile = "/etc/modules"
        case $ensure
        {
            present:
            {
                exec
                { "insert_module_${name}":
                        command => "/bin/echo '${name}' >> '${modulesfile}' && /sbin/modprobe '${name}'",
                        unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
                }
            }
  
            absent:
            {
                exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
                exec
                { "remove_module_${name}":
                    command => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'"
                }
            }
  
            default: { err ( "unknown ensure value ${ensure}" ) }
        }
    }

}


/etc/puppet/modules/drbd8/files/drbd.conf


global
{
    usage-count yes;
}

common
{
    protocol C;
    syncer { rate 400M; }
    net
    {
        allow-two-primaries;
        after-sb-0pri discard-younger-primary;
        after-sb-1pri consensus;
        after-sb-2pri call-pri-lost-after-sb;
        cram-hmac-alg sha1;
        shared-secret "123123";
    }
    handlers
    {
        pri-on-incon-degr "echo 0 > /proc/sysrq-trigger; halt -f";
        pri-lost-after-sb "echo 0 > /proc/sysrq-trigger; halt -f";
    }
}

resource xen-drbd-ideco {
  on clu-servd1 {
    device    /dev/drbd0;
    disk    /dev/xen_vg1/ideco;
    address    10.0.0.1:7788;
    meta-disk    internal;
  }
  on clu-servd3 {
    device    /dev/drbd0;
    disk    /dev/xen_vg1/ideco;
    address    10.0.0.2:7788;
    meta-disk    internal;
  }
}


/etc/puppet/modules/heartbeat/manifests/init.pp

class heartbeat
{
    package { "heartbeat": ensure => installed }
    
    file { "/etc/heartbeat/ha.cf":
            owner    => root,
            group    => root,
            mode     => 0644,
            source  => "puppet:///heartbeat/ha.cf";
      }
    
    file { "/etc/heartbeat/authkeys":
            owner    => root,
            group    => root,
            mode     => 0600,
            source  => "puppet:///heartbeat/authkeys";
      }
    
    file { "/etc/heartbeat/haresources":
            owner    => root,
            group    => root,
            mode     => 0644,
            source  => "puppet:///heartbeat/haresources";
      }
    
    file { "/etc/heartbeat/resource.d/xendom":
            owner    => root,
            group    => root,
            mode     => 0655,
            source  => "puppet:///heartbeat/xendom";
      }
    
    file { "/etc/heartbeat/resource.d/xendom2":
            owner    => root,
            group    => root,
            mode     => 0655,
            source  => "puppet:///heartbeat/xendom2";
      }
   
}


/etc/puppet/modules/heartbeat/files/haresources

 clu-servd1 xendom2::ideco::xen-drbd-ideco


/etc/puppet/modules/heartbeat/files

#!/bin/bash
set -e
SELF=10.0.0.1
PEER=10.0.0.2
if [ $(hostname) = clu-servd3 ]; then
    SELF=10.0.0.2
    PEER=10.0.0.1
fi
SSH_OPTS="-o ConnectTimeout=15"

# DomU name and DomU config filename must be identical
domu_name=$1
DRBD=$2
command=$3

CFGDIR=/etc/xen/config

function usage {
    echo "Usage: $0 DomU_name DRBD_resource start|stop|status"
    exit 1
}

function is_alive
{
    xm list $1 >/dev/null 2>&1
}

function safe_to_migrate
{
    case "$(drbdadm cstate $DRBD)" in
    Connected|SyncSource|SyncTarget)
        return 0
        ;;
    *)
        return 1
        echo "$DRBD is disconnected, NOT safe to migrate"
        ;;
    esac
}

function prepare_migration
{
    echo "Preparing for migration:"
    ssh $SSH_OPTS $PEER "drbdadm primary $DRBD";
}

function start_disk
{
    echo "Change $DRBD state to primary"
    drbdadm primary $DRBD
}

function stop_disk
{
    echo "Change $DRBD state to secondary:"
    drbdadm secondary $DRBD || true
}

function update_mac_cache
{
    arp -d $name >/dev/null 2>&1 || true
    ping -c1 -w1 $name >/dev/null 2>&1 || true
}

function start_domain
{
    start_disk
    echo -n "Starting $domu_config: "
    if is_alive $domu_name; then
        echo "already running."
    else
        if safe_to_migrate &&
                ssh $SSH_OPTS $PEER "xm migrate --live $domu_name $SELF"; then
            update_mac_cache
            echo "migrated back."
        else
            xm create -q $CFGDIR/$domu_name
            echo "created."
            sleep 2
        fi
    fi

    if safe_to_migrate; then
        ssh $SSH_OPTS $PEER "drbdadm secondary $DRBD" || true
    fi
}

function stop_domain {
    local migration
    if safe_to_migrate && prepare_migration; then
        migration="OK"
    else
        migration="NG"
    fi
        echo -n "Stopping $domu_name: "
        if ! is_alive $domu_name; then
            echo "not running."
        else
            if [ $migration = "OK" ] && xm migrate --live $domu_name $PEER; then
                update_mac_cache
                echo "migrated."
            else
                xm shutdown $domu_name
                echo "shutting down..."
            fi
        fi
    echo -n "Waiting for shutdown complete..."
        alive=0
            if is_alive $domu_name; then
                alive=1
            fi
        if [ $alive = 0 ]; then
            echo "ok"
        fi
        sleep 1
        if is_alive $domu_name; then
            echo "Destroying $domu_name"
            xm destroy $domu_name
        fi
    stop_disk
}

case $command in
start)
    start_domain
    ;;
stop)
    stop_domain
    ;;
*)
    usage
    ;;
esac
 

Комментариев нет:

Отправить комментарий