This commit is contained in:
Bob Slacker 2022-12-28 22:34:46 +01:00
parent e1b465da57
commit d05a5bf346
6 changed files with 171 additions and 13 deletions

View File

@ -31,6 +31,9 @@ kern.threads.max_threads_per_proc=4096
#kern.sched.slice=5
kern.coredump=0
###############################################
# FUSEfs Samba
vfs.usermount=1
###############################################
# Network
net.local.stream.recvspace=65536
net.local.stream.sendspace=65536

View File

@ -2,7 +2,7 @@
HISTFILE=~/.histfile
HISTSIZE=100000
SAVEHIST=1000000
setopt autocd beep extendedglob nomatch notify
setopt autocd beep extendedglob nomatch notify auto_pushd
bindkey -e
# End of lines configured by zsh-newuser-install
# The following lines were added by compinstall
@ -27,6 +27,9 @@ alias rd="most -w"
alias enw="emacs -nw"
alias manup="doas makewhatis /usr/local/man"
alias watch="cmdwatch"
alias dirs="dirs -v"
alias dz='zfs list -o space -t all -r'
alias tildeverse="ssh brs418@tilde.team"
# Add the line below to your .zshrc to enable auto suggestions.
source /usr/local/share/zsh-autosuggestions/zsh-autosuggestions.zsh

View File

@ -9,13 +9,16 @@ printexec() {
}
#if [[ $(date +%u) -eq 3 ]] || [[ $(date +%u) -eq 7 ]] ; then
printexec freebsd-update fetch install
printexec pkg update
printexec pkg upgrade
printexec pkg autoremove
printexec pkg audit -F -r
printexec pkg clean
printexec portsnap fetch update
printexec portmaster --clean-distfiles
printexec pkg stats
printexec freebsd-update fetch install
printexec pkg update
printexec pkg upgrade
printexec portsnap fetch update
printf "\e[91;1m * Done\e[0m\n"
exit 0
printf "\e[91;1m * Done\e[0m\n"
#fi
exit 0

125
home/beastie/dot-local/bin/zfsck Executable file
View File

@ -0,0 +1,125 @@
#!/bin/sh
#
# Calomel.org
# https://calomel.org/zfs_health_check_script.html
# FreeBSD ZFS Health Check script
# zfs_health.sh @ Version 0.18
# Check health of ZFS volumes and drives. On any faults send email.
# 99 problems but ZFS aint one
problems=0
# Health - Check if all zfs volumes are in good condition. We are looking for
# any keyword signifying a degraded or broken array.
condition=$(/sbin/zpool status | egrep -i '(DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED|FAIL|DESTROYED|corrupt|cannot|unrecover)')
if [ "${condition}" ]; then
emailSubject="`hostname` - ZFS pool - HEALTH fault"
problems=1
fi
# Capacity - Make sure the pool capacity is below 80% for best performance. The
# percentage really depends on how large your volume is. If you have a 128GB
# SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
# probably set the warning closer to 95%.
#
# ZFS uses a copy-on-write scheme. The file system writes new data to
# sequential free blocks first and when the uberblock has been updated the new
# inode pointers become valid. This method is true only when the pool has
# enough free sequential blocks. If the pool is at capacity and space limited,
# ZFS will be have to randomly write blocks. This means ZFS can not create an
# optimal set of sequential writes and write performance is severely impacted.
maxCapacity=80
if [ ${problems} -eq 0 ]; then
capacity=$(/sbin/zpool list -H -o capacity | cut -d'%' -f1)
for line in ${capacity}
do
if [ $line -ge $maxCapacity ]; then
emailSubject="`hostname` - ZFS pool - Capacity Exceeded"
problems=1
fi
done
fi
# Errors - Check the columns for READ, WRITE and CKSUM (checksum) drive errors
# on all volumes and all drives using "zpool status". If any non-zero errors
# are reported an email will be sent out. You should then look to replace the
# faulty drive and run "zpool scrub" on the affected volume after resilvering.
if [ ${problems} -eq 0 ]; then
errors=$(/sbin/zpool status | grep ONLINE | grep -v state | awk '{print $3 $4 $5}' | grep -v 000)
if [ "${errors}" ]; then
emailSubject="`hostname` - ZFS pool - Drive Errors"
problems=1
fi
fi
# Scrub Expired - Check if all volumes have been scrubbed in at least the last
# 8 days. The general guide is to scrub volumes on desktop quality drives once
# a week and volumes on enterprise class drives once a month. You can always
# use cron to schedual "zpool scrub" in off hours. We scrub our volumes every
# Sunday morning for example.
#
# Scrubbing traverses all the data in the pool once and verifies all blocks can
# be read. Scrubbing proceeds as fast as the devices allows, though the
# priority of any I/O remains below that of normal calls. This operation might
# negatively impact performance, but the file system will remain usable and
# responsive while scrubbing occurs. To initiate an explicit scrub, use the
# "zpool scrub" command.
#
# The scrubExpire variable is in seconds. So for 8 days we calculate 8 days
# times 24 hours times 3600 seconds to equal 691200 seconds.
scrubExpire=691200
if [ ${problems} -eq 0 ]; then
currentDate=$(date +%s)
zfsVolumes=$(/sbin/zpool list -H -o name)
for volume in ${zfsVolumes}
do
if [ $(/sbin/zpool status $volume | egrep -c "none requested") -ge 1 ]; then
printf "ERROR: You need to run \"zpool scrub $volume\" before this script can monitor the scrub expiration time."
break
fi
if [ $(/sbin/zpool status $volume | egrep -c "scrub in progress|resilver") -ge 1 ]; then
break
fi
scrubRawDate=$(/sbin/zpool status zroot | grep scrub | awk '{print $15 $12 $13}')
scrubDate=$(date -j -f '%Y%b%e-%H%M%S' $scrubRawDate'-000000' +%s)
if [ $(($currentDate - $scrubDate)) -ge $scrubExpire ]; then
emailSubject="`hostname` - ZFS pool - Scrub Time Expired. Scrub Needed on Volume(s)"
problems=1
fi
done
fi
# Email - On any problems send email with drive status information and
# capacities including a helpful subject line. Also use logger to write the
# email subject to the local logs. This is also the place you may want to put
# any other notifications like playing a sound file, beeping the internal
# speaker, paging someone or updating Nagios or even BigBrother.
#if [ "$problems" -ne 0 ]; then
# printf '%s\n' "$emailSubject" "" "`/sbin/zpool list`" "" "`/sbin/zpool status`" | /usr/bin/mail -s "$emailSubject" root@localhost
# logger $emailSubject
#fi
if [ "$problems" -ne 0 ]; then
printf '%s\n' "$emailSubject" "" "`/sbin/zpool list`" "" "`/sbin/zpool status`"
else
printf '%s\n' 'No problems found!'
fi
### EOF ###

View File

@ -6,6 +6,7 @@ bsdinfo
bsdisks
catfish
chromium
cmake
cmdwatch
conky
consolekit2
@ -19,32 +20,40 @@ drm-kmod
e2fsprogs
efivar
emacs
exfat-utils
feh
firefox
fortune-mod-freebsd-classic
freebsd-doc-en
freecad
fusefs-exfat
fusefs-ext2
fusefs-lkl
fusefs-ntfs
fusefs-ntfs-compression
fusefs-smbnetfs
fusefs-sshfs
gimp
git
glow
gmake
grub2-bhyve
gstreamer1-plugins-all
gtkglext
hexchat
hexchat-fish
homebank
htop
hw-probe
hwstat
ImageMagick7
inkscape
jq
keepassxc
lfm
librecad
libreoffice
libtool
libva-intel-driver
libvdpau-va-gl
lsblk
@ -62,7 +71,6 @@ ncurses
neofetch
netpbm
openvpn
palapeli
pciutils
pkg
pkg_tree
@ -78,9 +86,9 @@ qt5-style-plugins
rar
scrot
shotcut
starship
supertux2
supertuxkart
suyimazu
telegram-desktop
terminator
tigervnc-viewer
@ -93,8 +101,12 @@ vim
vm-bhyve
w3m-img
webcamd
webcamoid
wget
wifimgr
wine
wine-gecko
wine-mono
wireshark
wmwifi
wtf

View File

@ -6,6 +6,7 @@ bsdinfo
bsdisks
catfish
chromium
cmake
cmdwatch
conky
consolekit2
@ -19,32 +20,40 @@ drm-kmod
e2fsprogs
efivar
emacs
exfat-utils
feh
firefox
fortune-mod-freebsd-classic
freebsd-doc-en
freecad
fusefs-exfat
fusefs-ext2
fusefs-lkl
fusefs-ntfs
fusefs-ntfs-compression
fusefs-smbnetfs
fusefs-sshfs
gimp
git
glow
gmake
grub2-bhyve
gstreamer1-plugins-all
gtkglext
hexchat
hexchat-fish
homebank
htop
hw-probe
hwstat
ImageMagick7
inkscape
jq
keepassxc
lfm
librecad
libreoffice
libtool
libva-intel-driver
libvdpau-va-gl
lsblk
@ -62,7 +71,6 @@ ncurses
neofetch
netpbm
openvpn
palapeli
pciutils
pkg
pkg_tree
@ -78,9 +86,9 @@ qt5-style-plugins
rar
scrot
shotcut
starship
supertux2
supertuxkart
suyimazu
telegram-desktop
terminator
tigervnc-viewer
@ -93,8 +101,12 @@ vim
vm-bhyve
w3m-img
webcamd
webcamoid
wget
wifimgr
wine
wine-gecko
wine-mono
wireshark
wmwifi
wtf