russel053/ 11 月 17, 2017/ 執行檔

Update.sh

TODAY=$(date +"%Y%m%d")
zfs snapshot rpool/ROOT/[email protected]$TODAY
apt update
apt upgrade -y
apt-get autoremove (選填-y)
pveam update
cd netdata
./netdata-updater.sh

Smb.sh

mount -t cifs -o username="root",password="Password" //10.0.1.50/data /data

Scrub.sh

sudo zpool scrub RAID&
sudo zpool scrub RAID2&

iommu_status.sh

echo "iommu boot kernel flag"
cat /etc/default/grub |grep iommu
echo " "
echo "dmesg | grep -e DMAR -e IOMMU"
dmesg | grep -e DMAR -e IOMMU
echo " "
echo "lspci -vnn|grep Ethernet"
lspci -vnn|grep Ethernet
echo " "
echo "ls -al /sys/kernel/iommu_groups"
ls -al /sys/kernel/iommu_groups
echo " "
echo "find /sys/kernel/iommu_groups/ -type l"
find /sys/kernel/iommu_groups/ -type l

dd測速BS  dd.sh:

#!/bin/bash

# Since we're dealing with dd, abort if any errors occur
set -e

TEST_FILE=${1:-dd_obs_testfile}
TEST_FILE_EXISTS=0
if [ -e "$TEST_FILE" ]; then TEST_FILE_EXISTS=1; fi
TEST_FILE_SIZE=536870912

if [ $EUID ]; then
  echo "NOTE: Kernel cache will not be cleared between tests without sudo. This will likely cause inaccurate results." 1>&2
fi

# Header
PRINTF_FORMAT="%8s : %s\n"
printf "$PRINTF_FORMAT" 'block size' 'transfer rate'

# Block sizes of 512b 1K 2K 4K 8K 16K 32K 64K 128K 256K 512K 1M 2M 4M 8M 16M 32M 64M
for BLOCK_SIZE in 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 33554432 67108864
do
  # Calculate number of segments required to copy
  COUNT=$(($TEST_FILE_SIZE / $BLOCK_SIZE))

  if [ $COUNT -le 0 ]; then
    echo "Block size of $BLOCK_SIZE estimated to require $COUNT blocks, aborting further tests."
    break
  fi

  # Clear kernel cache to ensure more accurate test
  [ $EUID ] && [ -e /proc/sys/vm/drop_caches ] && echo 3 > /proc/sys/vm/drop_caches

  # Create a test file with the specified block size
  DD_RESULT=$(dd if=/dev/zero of=$TEST_FILE bs=$BLOCK_SIZE count=$COUNT conv=fsync 2>&1 1>/dev/null)

  # Extract the transfer rate from dd's STDERR output
  TRANSFER_RATE=$(echo $DD_RESULT | \grep --only-matching -E '[0-9.]+ ([MGk]?B|bytes)/s(ec)?')

  # Clean up the test file if we created one
  if [ $TEST_FILE_EXISTS -ne 0 ]; then rm $TEST_FILE; fi

  # Output the result
  printf "$PRINTF_FORMAT" "$BLOCK_SIZE" "$TRANSFER_RATE"
done

刪除所有zfs pool 快照 Killall_snapshot.sh:

for snapshot in `zfs list -H -t snapshot | cut -f 1`
do
zfs destroy $snapshot
done
#方法2
zfs list -r -H -t snapshot -o name | grep vmimage/vm-104-disk-2 | xargs -n1 zfs destroy

ZFS Send.sh:

#!/bin/sh

REMOVEDAY=$(date -d '8 day ago' +'%Y-%m-%d')
TODAY=$(date +"%Y%m%d")
YESTERDAY=$(date -d '1 day ago' +'%Y-%m-%d')
ZFSLOG=/var/log/zfs_backup.log

echo "---------------------------------------------------------------" >> $ZFSLOG
echo "$(date +%m/%d_%H:%M)      Backup begin" >> $ZFSLOG

#will send increment on daily basis
echo "$(date +%m/%d_%H:%M)      snapshot RAID/DATA" >> $ZFSLOG
zfs snapshot RAID/[email protected]$TODAY

#will send increment on daily basis
echo "$(date +%m/%d_%H:%M)      snapshot RAID/nextcloud" >> $ZFSLOG
zfs snapshot RAID/[email protected]$TODAY

#execute zfs send daily
echo "$(date +%m/%d_%H:%M)      start zfs send" >> $ZFSLOG
zfs send -i RAID/[email protected]$YESTERDAY RAID/[email protected]$TODAY | zfs recv -F RAID2/DATA&
zfs send -i RAID/[email protected]$YESTERDAY RAID/[email protected]$TODAY | zfs recv -F RAID2/nextcloud&

#remove old snapshot. on RAID
echo "$(date +%m/%d_%H:%M)      destroy old snapshot RAID" >> $ZFSLOG
zfs destroy RAID/[email protected]$REMOVEDAY
zfs destroy RAID/[email protected]$REMOVEDAY

echo "$(date +%m/%d_%H:%M)      destroy old snapshot from RAID" >> $ZFSLOG

#remove old snapshot on RAID2
echo "$(date +%m/%d_%H:%M)      destroy old snapshot RAID2" >> $ZFSLOG
zfs destroy RAID2/[email protected]$REMOVEDAY

SR-IOV vf on pfSense sh

#!/bin/bash

ip link set <sriov pf> up
ip link set <sriov pf> vf 0 [vlan 200] mac 00:25:90:58:xx:xx

INTERFACE=`ip link|grep enp |  awk -F ':' '{print $2}'`
ip link set $INTERFACE name [new name]

mellanox 40GbE 網卡更新 OFED Driver

#!/bin/bash
mkdir /root/mellanox
cd /root/mellanox/
wget http://content.mellanox.com/ofed/MLNX_OFED-4.4-2.0.7.0/MLNX_OFED_LINUX-4.4-2.0.7.0-debian9.4-x86_64.tgz
tar zxvf MLNX_OFED_LINUX-4.4-2.0.7.0-debian9.4-x86_64.tgz
cd MLNX_OFED_LINUX-4.4-2.0.7.0-debian9.4-x86_64/
./mlnx_add_kernel_support.sh -m /root/mellanox/MLNX_OFED_LINUX-4.4-2.0.7.0-debian9.4-x86_64 --make-tgz
mv /tmp/MLNX_OFED_LINUX-4.4-2.0.7.0-debian9.5-x86_64-ext.tgz /root/mellanox/
cd /root/mellanox
tar zvxf MLNX_OFED_LINUX-4.3-1.0.1.0-debian9.4-x86_64-ext.tgz
cp /usr/share/proxmox-ve/pve-apt-hook /root/

touch '/please-remove-proxmox-ve'
apt-get -y remove pve*
mkdir /usr/share/proxmox-ve
mv /root/pve-apt-hook /usr/share/proxmox-ve
apt install -y pve-kernel-4.15.18-7-pve pve-headers pve-headers-4.15.18-7-pve
/root/mellanox/MLNX_OFED_LINUX-4.3-1.0.1.0-debian9.4-x86_64-ext/mlnxofedinstall --skip-distro-check --force --without-fw-update --dkms
/etc/init.d/openibd restart
echo "knem" > /etc/modules
apt-get -y install proxmox-ve

systemctl stop pvestatd.service
systemctl start pvestatd.service
dpkg --configure pve-manager  proxmox-ve

systemctl start corosync
systemctl start pve-cluster

getswap.sh

#!/bin/bash
#
function getswap {
SUM=0
OVERALL=0
for DIR in `find /proc/ -maxdepth 1 -type d | egrep "^/proc/[0-9]"` ; do
PID=`echo $DIR | cut -d / -f 3`
PROGNAME=`ps -p $PID -o comm --no-headers`
for SWAP in `grep Swap $DIR/smaps 2>/dev/null| awk '{ print $2 }'`
do
let SUM=$SUM+$SWAP
done
echo "PID=$PID - Swap used: $SUM - ($PROGNAME )"
let OVERALL=$OVERALL+$SUM
SUM=0
done
echo "Overall swap used: $OVERALL"
}
getswap

getswap.sh > pids.txt

cat pids.txt | grep kvm


 

Share this Post