ZFStool: Setting up automatic snapshots

root@lightspeed =>  /home => service mysql-server stop                                                        
Stopping mysql.
Waiting for PIDS: 1008.

 root@lightspeed =>  /home => zfs create -o mountpoint=none zroot/var/db                                        
 root@lightspeed =>  /home => zfs list                                             
NAME                       USED  AVAIL  REFER  MOUNTPOINT
zroot                     18.4G  1.66T    88K  /zroot
zroot/ROOT                11.0G  1.66T    88K  none
zroot/ROOT/default        11.0G  1.66T  11.0G  /
zroot/tmp                  532K  1.66T   532K  /tmp
zroot/usr                 2.48G  1.66T    88K  /usr
zroot/usr/home             861M  1.66T   268K  /usr/home
zroot/usr/home/matto       860M  1.66T   860M  /usr/home/matto
zroot/usr/home/syncthing   528K  1.66T   528K  /usr/home/syncthing
zroot/usr/ports           1021M  1.66T  1021M  /usr/ports
zroot/usr/src              661M  1.66T   661M  /usr/src
zroot/var                 4.01M  1.66T    88K  /var
zroot/var/audit             88K  1.66T    88K  /var/audit
zroot/var/crash             88K  1.66T    88K  /var/crash
zroot/var/db                88K  1.66T    88K  none
zroot/var/log              448K  1.66T   448K  /var/log
zroot/var/mail             128K  1.66T   128K  /var/mail
zroot/var/tmp             3.11M  1.66T  3.11M  /var/tmp
zroot/vms                 4.85G  1.66T  4.85G  /vms
zroot/vms/fbsd11-stable    104K  1.66T   104K  /vms/fbsd11-stable

 root@lightspeed =>  /home => mv /var/db/mysql /var/db/mysql-tmp                                                
 root@lightspeed =>  /home => zfs create -o mountpoint=/var/db/mysql zroot/var/db/mysql                         
 root@lightspeed =>  /home => zfs list | grep mysql

NAME                       USED  AVAIL  REFER  MOUNTPOINT
zroot/var/db               176K  1.66T    88K  none
zroot/var/db/mysql          88K  1.66T    88K  /var/db/mysql

 root@lightspeed =>  /home => rsync -avhrP /var/db/mysql-tmp/ /var/db/mysql                                     
sending incremental file list

 root@lightspeed =>  ~mysql => cat /etc/crontab                                                                 
# /etc/crontab - root's crontab for FreeBSD
#
# $FreeBSD: head/etc/crontab 318443 2017-05-18 06:33:55Z ngie $
#
SHELL=/bin/sh
PATH=/etc:/bin:/sbin:/usr/bin:/usr/sbin
#
#minute hour    mday    month   wday    who     command
#
# Save some entropy so that /dev/random can re-seed on boot.
*/11    *       *       *       *       operator /usr/libexec/save-entropy
#
# Rotate log files every hour, if necessary.
0       *       *       *       *       root    newsyslog
#
# Perform daily/weekly/monthly maintenance.
1       3       *       *       *       root    periodic daily
15      4       *       *       6       root    periodic weekly
30      5       1       *       *       root    periodic monthly
#
# Adjust the time zone if the CMOS clock keeps local time, as opposed to
# UTC time.  See adjkerntz(8) for details.
1,31    0-5     *       *       *       root    adjkerntz -a
#
PATH=/etc:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
15,30,45 * * * * root /usr/local/sbin/zfs-auto-snapshot frequent  4
0        * * * * root /usr/local/sbin/zfs-auto-snapshot hourly   24
7        0 * * * root /usr/local/sbin/zfs-auto-snapshot daily     7
14       0 * * 7 root /usr/local/sbin/zfs-auto-snapshot weekly    4
28       0 1 * * root /usr/local/sbin/zfs-auto-snapshot monthly  12

 root@lightspeed =>  ~mysql => zfs list                                                                         
NAME                       USED  AVAIL  REFER  MOUNTPOINT
zroot                     18.4G  1.66T    88K  /zroot
zroot/ROOT                11.0G  1.66T    88K  none
zroot/ROOT/default        11.0G  1.66T  11.0G  /
zroot/tmp                  544K  1.66T   544K  /tmp
zroot/usr                 2.48G  1.66T    88K  /usr
zroot/usr/home             861M  1.66T   268K  /usr/home
zroot/usr/home/matto       860M  1.66T   860M  /usr/home/matto
zroot/usr/home/syncthing   528K  1.66T   528K  /usr/home/syncthing
zroot/usr/ports           1021M  1.66T  1021M  /usr/ports
zroot/usr/src              661M  1.66T   661M  /usr/src
zroot/var                 5.68M  1.66T    88K  /var
zroot/var/audit             88K  1.66T    88K  /var/audit
zroot/var/crash             88K  1.66T    88K  /var/crash
zroot/var/db              1.75M  1.66T    88K  none
zroot/var/db/mysql        1.67M  1.66T  1.67M  /var/db/mysql
zroot/var/log              448K  1.66T   448K  /var/log
zroot/var/mail             128K  1.66T   128K  /var/mail
zroot/var/tmp             3.11M  1.66T  3.11M  /var/tmp
zroot/vms                 4.85G  1.66T  4.85G  /vms
zroot/vms/fbsd11-stable    104K  1.66T   104K  /vms/fbsd11-stable

 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var/log                                
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var/mail                               
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var/tmp                                
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var/crash                              
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var/audit                              
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/var                                    
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/usr/src                                
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/usr/ports                              
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=false zroot/tmp                                    
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=true zroot                                         

 root@lightspeed =>  ~mysql => zfs get -t filesystem all | grep auto-snapshot                                   
zroot                     com.sun:auto-snapshot  true                   local
zroot/ROOT                com.sun:auto-snapshot  true                   inherited from zroot
zroot/ROOT/default        com.sun:auto-snapshot  true                   inherited from zroot
zroot/tmp                 com.sun:auto-snapshot  false                  local
zroot/usr                 com.sun:auto-snapshot  true                   inherited from zroot
zroot/usr/home            com.sun:auto-snapshot  true                   inherited from zroot
zroot/usr/home/matto      com.sun:auto-snapshot  true                   inherited from zroot
zroot/usr/home/syncthing  com.sun:auto-snapshot  true                   inherited from zroot
zroot/usr/ports           com.sun:auto-snapshot  false                  local
zroot/usr/src             com.sun:auto-snapshot  false                  local
zroot/var                 com.sun:auto-snapshot  false                  local
zroot/var/audit           com.sun:auto-snapshot  false                  local
zroot/var/crash           com.sun:auto-snapshot  false                  local
zroot/var/db              com.sun:auto-snapshot  false                  inherited from zroot/var
zroot/var/db/mysql        com.sun:auto-snapshot  false                  inherited from zroot/var
zroot/var/log             com.sun:auto-snapshot  false                  local
zroot/var/mail            com.sun:auto-snapshot  false                  local
zroot/var/tmp             com.sun:auto-snapshot  false                  local
zroot/vms                 com.sun:auto-snapshot  true                   inherited from zroot
zroot/vms/fbsd11-stable   com.sun:auto-snapshot  true                   inherited from zroot

 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=true zroot/var/db/mysql                            
 root@lightspeed =>  ~mysql => zfs set com.sun:auto-snapshot=true zroot/var/db                                  

  root@lightspeed =>  ~mysql => zfs list -o creation,name,used,refer,mountpoint -S used -t snap -r                                  
CREATION               NAME                                                               USED  REFER  MOUNTPOINT
Tue Mar 13 16:15 2018  zroot/ROOT/default@zfs-auto-snap_frequent-2018-03-13-16h15         284K  11.0G  -
Tue Mar 13 16:30 2018  zroot/ROOT/default@zfs-auto-snap_frequent-2018-03-13-16h30         264K  11.0G  -
Tue Mar 13 16:30 2018  zroot/usr/home/matto@zfs-auto-snap_frequent-2018-03-13-16h30       108K   860M  -
Tue Mar 13 16:30 2018  zroot@zfs-auto-snap_frequent-2018-03-13-16h30                         0    88K  -
Tue Mar 13 16:30 2018  zroot/usr/home@zfs-auto-snap_frequent-2018-03-13-16h30                0   268K  -
Tue Mar 13 16:30 2018  zroot/usr/home/syncthing@zfs-auto-snap_frequent-2018-03-13-16h30      0   528K  -
Tue Mar 13 16:30 2018  zroot/var/db/mysql@zfs-auto-snap_frequent-2018-03-13-16h30            0  1.67M  -
Tue Mar 13 16:30 2018  zroot/vms@zfs-auto-snap_frequent-2018-03-13-16h30                     0  4.85G  -
Tue Mar 13 16:30 2018  zroot/vms/fbsd11-stable@zfs-auto-snap_frequent-2018-03-13-16h30       0   104K  -

DNS, Gateway, Router Setup for Bhyve & iocage: TOTAL CONTAINERIZATION

root@bean     1.15   0%   ~  cat /etc/pf.conf                                                                                                                              210

#

THINKS TO SELF: Hrm, why yes, that is a $BOOTAY_KICKING prompt! I need to document it actually…later…
# Instant NAT
nat pass on ix0 from {172.16.0.0/24} to any -> (ix0)

# Better NAT/RDR
# Define the interfaces
ext_if = "ix0"
int_if = "bridge0"
tcp_svcs = "{ 22 2200 80 443 5000:6000 8000:9001 10000 }"
#container_net = $int_if:network

# Define the IP address of containers & ports for rdr/nat
FNASVM = "172.16.0.230"
FNASVM_TCP_PORTS = "{ 80, 443 }"

# Normalize packets & pass anything in TCP_SVCS
#scrub in all

# Define the NAT for the containers
nat on $ext_if from $int_if to any -> ($ext_if)

# FREENAS VM: Redirect traffic on ports 8180 and 8443
rdr pass on $ext_if proto tcp from any to any port 8180 -> $FNASVM port 80
rdr pass on $ext_if proto tcp from any to any port 8443 -> $FNASVM port 443

# Hrm, maybe quick is too fast
#pass in quick on $ext_if proto tcp from any to any port $tcp_svcs
pass in on $ext_if proto tcp from any to any port $tcp_svcs

Managing FreeBSD Bhyve Containers With VM-BHYVE

SNAPSHOTS, CLONES, AND ROLLBACKS, OH MY!

Before we get started, my tasty friends (yes, hungry!), let me tell you, I am unfathomably proud of myself for making the graphic for this in Gimp. Yeah, it only took me like 2.5 hours. blows on nails. I’m that good.

Ok, one of the awesome FreeBSD tools I use frequently:

 vm clone name[@snapshot] new-name
 vm snapshot [-f] name|name@snapshot

Later, if you like, you can restore a previous snapshot of your vm:

    rollback [-r] <name@snapshot>

VM-BHYVE SNAPSHOT: Easy as Pie 😉

It’s best to make sure the conta`iner is powered-off:

    vm poweroff $name

Now, we can make the snapshot…

    root@bean   ~  vm snapshot fnas11vm                           2089

“Trust But Verify”

— Ronald Reagan

    root@bean   ~  zfs list -t snap | grep fnas11vm        1 ↵     2090

    NAME                                            USED  AVAIL  REFER  MOUNTPOINT
    zroot/vm/fnas11vm@2018-01-02-12:38:07              0      -    96K  -
    zroot/vm/fnas11vm/disk0@2018-01-02-12:38:07        0      -  1.21G  -
    zroot/vm/fnas11vm/disk1@2018-01-02-12:38:07        0      -  7.53M  -

Creating an image from the container for provisioning more containers!

    root@bean   ~  vm image create -d 'fnas11_image' fnas11vm        2099

    Creating a compressed image, this may take some time... 
    Image of fnas11vm created with UUID 650759c6-efff-11e7-8013-0cc47ac2a6ec

FIGHTING WITH FONTS!? REALLY!? Ok, this is BadASSDOM!

Sweet FreeBSD ZSH/POWERLINE9k CONSOLE PROMPT WITH AN OS ICON

Movie Reference: Knights of BaddAssDom

The Goal:

Requirements:
  • powerline-status
  • powerline-fonts
  • patience of Ghandi
  • tenacity of a door-to-door salesperson

    > vi kde4/share/apps/konsole/Shell.profile[+]

:!ls /usr/local/share/fonts/Droid/Droid\ Sans\ Mono\ for\ Powerline\ Nerd\ Font\ Complete.otf                                   

 .k/s/a/k/Shell.profile+                                                                                                   
[Appearance]
AntiAliasFonts=true

ColorScheme=GreenOnBlack

 #Font=Source Code Pro for Powerline,15,-1,5,63,0,0,0,0,0
 Font=Droid Sans Mono for Powerline Nerd Font Complete,15,-1,5,63,0,0,0,0,0

Mirrored ZPOOL with ZFS Boot on Root FAIL … and Fix

matto@spock: cat /projects_share/docs/zfs_zpool-attachdrive.txt

iX SpecOps: MIRRORED ZPOOL ROOT/BOOT FIX

Description: SINGLE VDEV BOOT ZPOOL FOR PRODUCTION SYSTEM

The problem with Prometheus is that ada4 had another pool on it, tank1. Hence the tank2 zpool name, I suspect. Since somebody did not destroy this root zpool with bootcode when bootcode & gpt partitioning
was done on the Intel ada0/1 pair, when the Intel RAID broke or was not discovered first, the system booted from the broken ada3s2 partition.

In fact, chances are that this occured many times & nearly every device in the system, which is full of unmatched drives of various sizes with no apparently plan.

        zpool import                                     
           pool: tank1
             id: 13323130829716330915
          state: ONLINE
         status: The pool was last accessed by another system.
         action: The pool can be imported using its name or numeric identifier and
             the '-f' flag.
           see: http://illumos.org/msg/ZFS-8000-EY
         config:

                tank1       ONLINE
                  ada4p2    ONLINE

The Fix:

It was a correct assumption to attempt adding the device to the existing 30GB zpool, called tank2. Various permutations of this approach were executed.

In addition, several methods were used to copy the existing partitions onto the second disk. This failed in the same fashion as directly attaching the drive to the existing zpool.

The errors seemed of little help.

        root@prometheus#> gpart backup /dev/ada3 | \
          gpart restore /dev/ada4
          gpart: geom 'ada4': Operation not permitted

        root@prometheus#> gpart backup /dev/ada3
          GPT 152
          1   freebsd-boot       40      512
          2   freebsd-zfs      552 58306560
          3   freebsd-swap 58307112  4194304

        root@prometheus#> part backup /dev/ada3 | \
          gpart restore /dev/ada4
        : Operation not permitted
        root@prometheus#> zpool attach $POOL_NAME ada3s2 ada4s2
        : Operation not permitted  # AND SO ON AND SO FORTH

## UNTIL:

   root@prometheus#> 
sysctl kern.geom.debugflags=0x10
   kern.geom.debugflags: 0 -> 16

    zpool attach -f tank2 /dev/ada3p2 /dev/ada4p2

Make sure to wait until resilver is done before rebooting.

If you boot from pool ‘tank2’, you may need to update boot code on newly attached disk ‘/dev/ada4p2’.

<br />     gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 ada4
     partcode written to ada4p1
     bootcode written to ada4
#################################### #### SUCCESS !
        zpool status tank2
        zsh: correct 'tank2' to 'tank' [nyae]? n
          pool: tank2
         state: ONLINE
        status: One or more devices is currently being resilvered.  The pool will
                continue to function, possibly in a degraded state.
        action: Wait for the resilver to complete.
          scan: resilver in progress since Thu Dec 14 19:25:29 2017
                31.2M scanned out of 23.1G at 201K/s, 33h26m to go
                30.2M resilvered, 0.13% done
        config:

                NAME        STATE     READ WRITE CKSUM
                tank2       ONLINE       0     0     0
                  mirror-0  ONLINE       0     0     0
                    ada3p2  ONLINE       0     0     0
                    ada4p2  ONLINE       0     0     0  (resilvering)