Subversion Repositories sysadmin_scripts

Rev

Rev 102 | Rev 119 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

#! /usr/bin/env perl

#    snapShot: Manage ZFS snapshots
#    see http://wiki.linuxservertech.com for additional information
#    Copyright (C) 2022  R. W. Rodolico
#
#    version 1.0, 20220423
#       Initial Release
#
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Warning, this script requires non-standard Perl modules YAML::Tiny and Hash::Merge
# Under Debian:  apt install libyaml-tiny-perl libhash-merge-simple-perl
# Under FreeBSD: cpan -i Hash::Merge::Simple YAML::Tiny


use strict;
use warnings;

use Data::Dumper;
use Time::Local;
use POSIX qw(strftime);
use YAML::Tiny; # apt-get libyaml-tiny-perl under debian, BSD Systems: cpan -i YAML::Tiny
use Hash::Merge::Simple qw/ merge clone_merge /; # apt install libhash-merge-simple-perl or cpan -i Hash::Merge::Simple


# globals
my $CONFIG_FILE_NAME = 'snapShot.yaml';

# This will be read in from snapShot.yaml
my $config;

#
# find where the script is actually located as cfg should be there
#
sub getScriptLocation {
   use strict;
   use File::Spec::Functions qw(rel2abs);
   use File::Basename;
   return dirname(rel2abs($0));
}

#
# Read the configuration file from current location 
# and return it as a string
#
sub readConfig {
   my $scriptLocation = &getScriptLocation();
   if ( -e "$scriptLocation/$CONFIG_FILE_NAME" ) {
      my $yaml = YAML::Tiny->read( "$scriptLocation/$CONFIG_FILE_NAME" );
      # use clone_merge to merge conf file into $config
      # overwrites anything in $config if it exists in the config file
      $config = clone_merge( $config, $yaml->[0] );
      return 1;
   }
   return 0;
}


# parse one single line from the output of `zfs list [-t snapshot]`
sub parseListing {
   my ($line,$keys) = @_;
   chomp $line;
   my %values;
   @values{@$keys} = split( /\s+/, $line );
   return \%values;
}      


# this will parse the date out of the snapshots and put the values into
# the hash {'date'}
sub parseSnapshots {
   my ( $snapShots, $config) = @_;
   my $keys = $config->{'snapshot'}->{'parseFields'};
   foreach my $snapShot ( keys %$snapShots ) {
      my %temp;
      # run the regex, capture the output to an array, then populate the hash %temp
      # using the regex results as the values, and $keys as the keys
      @temp{@$keys} = ( $snapShot =~ m/$config->{'snapshot'}->{'parse'}/ );
      # while we're here, calculate the unix time (epoch). NOTE: month is 0 based
      $temp{'unix'} = timelocal( 0,$temp{'minute'},$temp{'hour'},$temp{'day'},$temp{'month'}-1,$temp{'year'} );
      # put this into our record
      $snapShots->{$snapShot}->{'date'} = \%temp;
   }
}
      
# run $command, then parse its output and return the results as a hashref
# $command is one of zfs list or zfs list -t snapshot
# In other words, get all datasets/volumes or get all snapshots
sub getListing {
   my ($configuration, $regex, $command )  = @_;
   my %dataSets;

   # get all datasets/volumes or snapshots
   my @zfsList = `$command`;
   foreach my $thisSet ( @zfsList ) {
      # parse the line into its portions. The only one we use right now is name
      my $temp = &parseListing( $thisSet, $configuration->{'listingKeys'} );
      if (  $temp->{'name'} =~ m/^($regex)$/ ) { # it matches the regex we're using, so save it
         $dataSets{$temp->{'name'}} = $temp;
      }
   }
   return \%dataSets; # return all entries we are looking for
}

# will convert something like 1 day to the number of seconds (86400) for math.
# month and year are approximations (30.5 day = a month, 365.2425 days is a year)
# For month and year, use the int function to convert back to integer
sub period2seconds {
   my ($count, $unit) = ( shift =~ m/\s*(\d+)\s*([a-z]+)\s*/i );
   $unit = lc $unit;
   if ( $unit eq 'hour' ) {
      $count *= 3600;
   } elsif ( $unit eq 'day' ) {
      $count *= 86400;
   } elsif ( $unit eq 'week' ) {
      $count *= 864000 * 7;
   } elsif ( $unit eq 'month' ) {
      $count *= int( 864000 * 30.5 );
   } elsif ( $unit eq 'year' ) {
      $count *= int( 86400 * 365.2425 );
   } else {
      die "Unknown units [$unit] in period2seconds\n";
   }
   return $count;
}

# Merges datasets, snapshots and some stuff from the configuration into the datasets
# hash. After this, $config and $snapshots should no longer be necessary
sub mergeData {
   my ($datasets,$snapshots,$config) = @_;
   my $confKeys = $config->{'datasets'};
   foreach my $thisDataset ( keys %$datasets ) {
      # go through each configuration entry and see if we match the current dataset
      foreach my $conf (keys %$confKeys ) {
         if ( $thisDataset =~ m/^$conf$/ ) { # found it, so store the configuration values into the dataset
            $datasets->{$thisDataset}->{'recursive'} = $confKeys->{$conf}->{'recursive'};
            $datasets->{$thisDataset}->{'frequency'} = &period2seconds( $confKeys->{$conf}->{'frequency'} );
            $datasets->{$thisDataset}->{'retention'} = &period2seconds( $confKeys->{$conf}->{'retention'} );
            last; # there is only one, so no need to process any more for this configuration key
         } # if
      } # foreach
      # do the same for the snapshots we found; bind them to the data set
      foreach my $snapshot ( keys %$snapshots ) {
         if ( $snapshot =~ m/^$thisDataset@/ ) { # this is a match
            # copy the snapshot into the dataset
            $datasets->{$thisDataset}->{'snapshots'}->{$snapshot} = $snapshots->{$snapshot};
            # track the latest snapshot (we use this to decide whether it is time to add a new one)
            $datasets->{$thisDataset}->{'lastSnap'} = $snapshots->{$snapshot}->{'date'}->{'unix'}
               if ! defined( $datasets->{$thisDataset}->{'lastSnap'} ) || $datasets->{$thisDataset}->{'lastSnap'} < $snapshots->{$snapshot}->{'date'}->{'unix'};
            # delete the snapshot, to free up memory
            delete $snapshots->{$snapshot};
         } # if
      } # foreach
   } # foreach
} # sub mergeData
   

# check to see if a particular snapshot is ready to be destroyed, ie right now is greater than the retention period
# if $recurive is true, add the '-r' to the command to do a recursive destroy
sub checkRetention {
   my ( $retentionPeriod, $recursive, $snapshots, $now ) = @_;
   my @toDelete; # an array of destroy commands
   foreach my $thisSnapshot ( keys %$snapshots ) {
      # print "checking $thisSnapshot\n\tNow: $now\n\tDate: $snapshots->{$thisSnapshot}->{date}->{unix}\n\tRetention: $retentionPeriod\n\n";
      if ( $now - $snapshots->{$thisSnapshot}->{'date'}->{'unix'} > $retentionPeriod ) { # it is too old
         push ( @toDelete, ( 'zfs destroy ' . ($recursive ? '-r ' : '') . $thisSnapshot ) ); # list it to be destroyed
      }
   }
   return @toDelete; # just return the list of destroy commands to be executed
}   


# just return the command to create a new snapshot. Very simple, but I wanted the code to be isolated in case something needed
# to change. Basically, zfs snapshot [-r] datasetname@template
sub makeSnapshot {
   my ( $datasetName, $recursive, $snapshotName ) = @_;
   return 
      'zfs snapshot ' . 
      ($recursive ? '-r ' : '') . 
      $datasetName . $snapshotName;
}

# this is the biggie; everything leads to here. We will take every dataset/volume we found, and decide whether some old snapshots
# need to be destroyed, and whether a new snapshot needs to be created.
sub process {
   my ( $datasets, $now, $snapshotName, $slop ) = @_;
   my @toDelete; # will hold all the destroy commands
   my @toAdd; # will hold all the create commands
   
   foreach my $thisDataset ( keys %$datasets ) { # Look at each dataset/volume in turn
      # if any snapshots need to be destroyed, add them to @toDelete
      push( @toDelete, 
         &checkRetention( 
         $datasets->{$thisDataset}->{'retention'}, 
         $datasets->{$thisDataset}->{'recursive'}, 
         $datasets->{$thisDataset}->{'snapshots'}, 
         $now )
         );
      # if it is time to add a new snapshot, add it to @toAdd
      if ( $datasets->{$thisDataset}->{'lastSnap'} + $datasets->{$thisDataset}->{'frequency'} - $slop < $now ) {
         push @toAdd, &makeSnapshot( $thisDataset, $datasets->{$thisDataset}->{'recursive'}, $snapshotName )
      }
   }
   # return the actions, deletions first, adds second (executed in that order)
   return ( @toDelete, @toAdd );
}   

# Run 0 or more commands
# the first thing on the stack is a flag for testing
# everything after that is an ordered list of commands to be executed.
# If any command fails, all subsequent commands abort
sub run {
   my $testing = shift;
   return 0 unless @_; # bail if there are no commands to run
   if ( $testing ) { # don't do it, just dump the commands to /tmp/snapShot
      open LOG, ">/tmp/snapShot" or die "could not write to /tmp/snapShot: $!\n";
      print LOG join( "\n", @_ ) . "\n";
      close LOG;
   } else { # actually execute the commands
      my $out; # capture all output
      return 'Not running right now'; # temp while testing
      while ( my $command = shift ) { # for each command on the stack
         $out .= `$command` . "\n"; # add it to $out
         if ( $? ) { # we had an error, add debugging text, the end program
            $out .= "Error executing command\n\t$command\n\t";
            if ($? == -1) {
                $out .= "failed to execute $command: $!";
            } elsif ($? & 127) {
                $out .= sprintf( "child died with signal %d, %s coredump", ($? & 127),  ($? & 128) ? 'with' : 'without' );
            } else {
                $out .= sprintf( "child exited with value %d", $? >> 8 );
            }
            $out .= "\n";
            return $out;
         }
      }
   }
   return 0; # we succeeded
}

&readConfig() or die "Could not read config file: $!\n";

# we're pre-calculating some things so we don't do it over and over for each entry
# grab the time once
my $now = time;
# create the string to be used for all snapshots, using $now and the template provided
my $snapshotName = '@' . strftime($config->{'snapshot'}->{'template'},localtime $now);
# Create the dataset regex by joing all of the regexes defined.
$config->{'dataset_regex'} = '(' . join( ')|(', keys %{ $config->{'datasets'} }  ) . ')' unless $config->{'dataset_regex'};
#print $config{'dataset_regex'} . "\n";
$config->{'snapshot_regex'} = '(' . $config->{'dataset_regex'} . ')@' . $config->{'snapshot'}->{'parse'};
#print $config->{'snapshot_regex'} . "\n\n";
   
#die Dumper( $config ) . "\n";   
# first, find all datasets which match our keys
my $dataSets = &getListing( $config, $config->{'dataset_regex'}, 'zfs list'  );
# and, find all snapshots that match
my $snapshots = &getListing( $config, $config->{'snapshot_regex'}, 'zfs list -t snapshot'  );
# get the date/time of the snapshots and store them in the hash
&parseSnapshots($snapshots, $config );
# mergeData the snapshots into the datasets for convenience
&mergeData( $dataSets, $snapshots, $config );
# Now, let's do the actual processing
my @commands  = &process( $dataSets, $now, $snapshotName, &period2seconds( $config->{'slop'} ) );
#print join ( "\n", @commands ) . "\n";
my $errors;
print "Error: $errors\n" if $errors = &run( $config->{'TESTING'}, @commands );

# print Dumper( $dataSets );
#print Dumper( $snapshots );

#print join ("\n", sort keys( %$dataSets ) ) . "\n\n";
#print join( "\n", sort keys( %$snapshots ) ) . "\n";

1;