[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] kernel BUG at arch/x86/xen/mmu.c:1860!


  • To: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
  • From: Teck Choon Giam <giamteckchoon@xxxxxxxxx>
  • Date: Sat, 15 Jan 2011 03:25:55 +0800
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
  • Delivery-date: Fri, 14 Jan 2011 11:26:39 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:in-reply-to:references:date:message-id:subject:from:to :cc:content-type; b=BqjjiGrn5Wljvtma5SBUEe2mVK+K1OCTJ4CyR9oiVlQ9AhSfCeY7KjNg/3Yss5laib zkkecSH4yjeJghr9zG1nWgAqflcaf8jv5gTP53XJg2mllfmzF11iHQogkQVaRNxvRNMZ r6fP/dX2lTuwQ/5qpf7ZvZfP/PUTMRmDr2N0I=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>



On Fri, Jan 14, 2011 at 11:20 PM, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> wrote:
On Wed, Dec 29, 2010 at 12:58:15PM +0800, Teck Choon Giam wrote:
> Below is my latest test crash script:

This test script is still valid? Or do you have a more updated one?

Below is the more updated one.  Please note that this will mostly not support VG in CLVM since clustered LVM doesn't support snapshot in my CentOS 5 testing... ...

----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------
#!/bin/sh
#
# This script is to create lvm snapshot, mount it, umount it and remove in a
# specified number of loops to test whether it will crash the host server.
# All LVM snapshots assumed can be mounted like if you are running a PV domU.
#
# Created by Giam Teck Choon
#

# The LV name and for this case we are using the first in vgdisplay output.
# Change the variable if you want other VG Name or change head -n 1 to
# tail -n 1 if you prefer to use last VG instead of first if you happen to have
# more than one VG
LVGroupName=`vgdisplay | grep 'VG Name' | awk '{print $3}' | head -n 1`

if [ ! -n "$LVGroupName" ] && [ ! -d "/dev/${LVGroupName}" ] ; then
    echo "Unable to detect VG Name!"
    exit 1
fi

# return 1 if is mounted otherwise return 0
check_mount() {
    local checkdir=${1}
    if [ -n "$checkdir" ] ; then
        local check=`grep "$checkdir" /proc/mounts`
        if [ -n "$check" ] ; then
            return 1
        fi
    fi
    return 0
}

# We will create 5 testcrash LV in $LVGroupName each with 5GB size
# and format it as ext3
do_lvm_create_testcrash() {
    local lvname=${1:-testcrash}
    local lvsize=${2:-5G}
    local count=1
    local limit=5
    while [ "$count" -le "$limit" ]
    do
        if [ ! -h "/dev/${LVGroupName}/${lvname}${count}" ] ; then
            echo "lvcreate -v -n ${lvname}${count} -L ${lvsize} ${LVGroupName} ... ... "
            lvcreate -v -n ${lvname}${count} -L ${lvsize} ${LVGroupName}
            echo "lvcreate -v -n ${lvname}${count} -L ${lvsize} ${LVGroupName} completed!"
            if [ -h "/dev/${LVGroupName}/${lvname}${count}" ] ; then
                echo "mke2fs -F -j /dev/${LVGroupName}/${lvname}${count} ... ... "
                mke2fs -F -j /dev/${LVGroupName}/${lvname}${count}
                echo "mke2fs -F -j /dev/${LVGroupName}/${lvname}${count} completed!"
            else
                echo "/dev/${LVGroupName}/${lvname}${count} not found!"
            fi
        fi
        count=`expr $count + 1`
    done
}

do_lvm_create_remove() {
    # number of loops default is 1
    local loopcountlimit=${1:-1}
    # snapshot size default is 1G
    local snapshotsize=${2:-1G}
    # implement a sleep between create, mount, umount and remove (default is 0 which is no pause)
    local pauseinterval=${3:-0}
    # execute commands after each pause/sleep such as sync or anything that you want to test
    local commands=${4}
    # We filter out snapshot and swap
    local count=0
    if [ -d "/dev/${LVGroupName}" ] ; then
        while [ "$count" -lt "$loopcountlimit" ]
        do
            count=`expr $count + 1`
            echo "${count} ... ... "
            for i in `ls /dev/${LVGroupName} | grep -Ev 'snapshot$' | grep -Ev 'swap$'`; do
                if [ -h "/dev/${LVGroupName}/${i}" ] ; then
                    echo -n "lvcreate -s -v -n ${i}-snapshot -L ${snapshotsize} /dev/${LVGroupName}/${i} ... ... "
                    lvcreate -s -v -n ${i}-snapshot -L ${snapshotsize} /dev/${LVGroupName}/${i}
                    echo "done."
                    sleep ${pauseinterval}
                    if [ -n "$commands" ] ; then
                        echo -n "${commands} ... ... "
                        $commands
                        echo "done."
                    fi
                    mkdir -p /mnt/testlvm/${i}
                    if [ -h "/dev/${LVGroupName}/${i}-snapshot" ] ; then
                        check_mount /mnt/testlvm/${i}
                        local ismount=$?
                        if [ "$ismount" -eq 0 ] ; then
                            echo -n "mount /dev/${LVGroupName}/${i}-snapshot /mnt/testlvm/${i} ... ... "
                            mount /dev/${LVGroupName}/${i}-snapshot /mnt/testlvm/${i}
                            echo "done."
                            sleep ${pauseinterval}
                            if [ -n "$commands" ] ; then
                                echo -n "${commands} ... ... "
                                $commands
                                echo "done."
                            fi
                        fi
                        check_mount /mnt/testlvm/${i}
                        local ismount2=$?
                        if [ "$ismount2" -eq 1 ] ; then
                            echo -n "umount /mnt/testlvm/${i} ... ... "
                            umount /mnt/testlvm/${i}
                            echo "done."
                            sleep ${pauseinterval}
                            if [ -n "$commands" ] ; then
                                echo -n "${commands} ... ... "
                                $commands
                                echo "done."
                            fi
                        fi
                    fi
                    rm -rf /mnt/testlvm/${i}
                    echo -n "lvremove -f /dev/${LVGroupName}/${i}-snapshot ... ... "
                    lvremove -f /dev/${LVGroupName}/${i}-snapshot
                    echo "done."
                    sleep ${pauseinterval}
                    if [ -n "$commands" ] ; then
                        echo -n "${commands} ... ... "
                        $commands
                        echo "done."
                    fi
                fi
            done
            rm -fr /mnt/testlvm
        done
    else
        echo "/dev/${LVGroupName} directory not found!"
        exit 1
    fi
}

case $1 in
    setup)    shift
        do_lvm_create_testcrash "$@"
        ;;
    loop)    shift
        do_lvm_create_remove "$@"
        ;;
    *)    cat <<HELP
Usage: $0 loop loopcountlimit snapshotsize pauseinterval commands
Where:
    loopcountlimit is default to 1
    snapshotsize is default to 1G
    pauseinterval is default to 0
    commands is default to none

Example to run with 100 loops without pause/sleep:
    $0 loop 100

Example to run with 100 loops with pause/sleep of 5 seconds:
    $0 loop 100 1G 5

Example to run with 100 loops with snapshot size of 2G instead of 1G:
    $0 loop 100 2G

Example to run with 50 loops, 1G snapshot size, 5 seconds pause and with sync:
command with each pause/sleep
    $0 loop 50 1G 5 sync

Example to run your own commands:
    $0 loop 100 1G 5 "echo hi && sync"

If this is the first time you are running and do not have any LV in your VG, run:
    $0 setup
This will create 5 testcrash LV in your VG with 5GB size each (default) and format
to ext3.

HELP
        ;;
esac

----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------8<----------

Thanks.

Kindest regards,
Giam Teck Choon
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.