diff options
Diffstat (limited to 'handlers')
| -rw-r--r-- | handlers/dup.helper.in | 2 | ||||
| -rw-r--r-- | handlers/dup.in | 99 | ||||
| -rw-r--r-- | handlers/ldap.in | 2 | ||||
| -rw-r--r-- | handlers/mysql.in | 2 | ||||
| -rw-r--r-- | handlers/pgsql.helper.in | 30 | ||||
| -rw-r--r-- | handlers/pgsql.in | 104 | ||||
| -rw-r--r-- | handlers/rdiff.in | 10 | ||||
| -rw-r--r-- | handlers/rsync.in | 42 | 
8 files changed, 185 insertions, 106 deletions
| diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in index e985c5e..12331a3 100644 --- a/handlers/dup.helper.in +++ b/handlers/dup.helper.in @@ -179,7 +179,7 @@ do_dup_gpg_signkey() {        dup_gpg_onekeypair=no     fi -   if [ "$dup_gpg_onekeypair" == "no" }; then +   if [ "$dup_gpg_onekeypair" == "no" ]; then        # signkey ?        REPLY=        while [ -z "$REPLY" -o -z "$dup_gpg_signkey" ]; do diff --git a/handlers/dup.in b/handlers/dup.in index 5216643..e3475b8 100644 --- a/handlers/dup.in +++ b/handlers/dup.in @@ -2,7 +2,7 @@  # vim: set filetype=sh sw=3 sts=3 expandtab autoindent:  #  # duplicity script for backupninja -# requires duplicity +# requires duplicity >= 0.4.4, and >= 0.4.9 when using a custom tmpdir.  #  getconf options @@ -106,35 +106,14 @@ duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"  duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"  duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`" -### ssh/scp/sftp options -# 1. duplicity >= 0.4.2 needs --sftp-command -#    (NB: sftp does not support the -l option) -# 2. duplicity 0.4.3 to 0.4.9 replace --ssh-command with --ssh-options, which is -#    passed to scp and sftp commands by duplicity. We don't use it: since this -#    version does not use the ssh command anymore, we keep compatibility with -#    our previous config files by passing $sshoptions to --scp-command and -#    --sftp-command ourselves - +### ssh/scp/sftp options (duplicity < 0.4.3 is unsupported)  scpoptions="$sshoptions"  if [ "$bandwidthlimit" != 0 ]; then     [ -z "$desturl" ] || warning 'The bandwidthlimit option is not used when desturl is set.'     scpoptions="$scpoptions -l $bandwidthlimit"  fi - -# < 0.4.2 : only uses ssh and scp -if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 2 ]; then -   execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --ssh-command 'ssh $sshoptions'" -# >= 0.4.2 : also uses sftp, --sftp-command option is now supported -else -   sftpoptions="$sshoptions" -   # == 0.4.2 : uses ssh, scp and sftp -   if [ "$duplicity_major" -eq 0 -a "$duplicity_minor" -eq 4 -a "$duplicity_sub" -eq 2 ]; then -      execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions' --ssh-command 'ssh $sshoptions'" -   # >= 0.4.3 : uses only scp and sftp, --ssh-command option is not supported anymore -   else -      execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'" -   fi -fi +sftpoptions="$sshoptions" +execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'"  ### Symmetric or asymmetric (public/private key pair) encryption  if [ -n "$encryptkey" ]; then @@ -162,12 +141,7 @@ fi  # full backup.  # If incremental==no, force a full backup anyway.  if [ "$incremental" == "no" ]; then -   # before 0.4.4, full was an option and not a command -   if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then -      execstr_options="${execstr_options} --full" -   else -      execstr_command="full" -   fi +   execstr_command="full"  else     # we're in incremental mode     if [ "$increments" != "keep" ]; then @@ -185,7 +159,6 @@ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -g  fi  ### Temporary directory -precmd=  if [ -n "$tmpdir" ]; then     if [ ! -d "$tmpdir" ]; then        info "Temporary directory ($tmpdir) does not exist, creating it." @@ -194,7 +167,7 @@ if [ -n "$tmpdir" ]; then        chmod 0700 "$tmpdir"     fi     info "Using $tmpdir as TMPDIR" -   precmd="${precmd}TMPDIR=$tmpdir " +   execstr_options="${execstr_options} --tempdir '$tmpdir'"  fi  ### Archive directory @@ -211,10 +184,6 @@ if [ "$keep" != "yes" ]; then     if [ "`echo $keep | tr -d 0-9`" == "" ]; then        keep="${keep}D"     fi -   # before 0.4.4, remove-older-than was an option and not a command -   if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then -      execstr_options="${execstr_options} --remove-older-than $keep" -   fi  fi  ### Source @@ -269,56 +238,52 @@ fi  ### Cleanup commands (duplicity >= 0.4.4)  # cleanup -if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then -   debug "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart" +debug "duplicity cleanup --force $execstr_options $execstr_serverpart" +if [ ! $test ]; then +   export PASSPHRASE=$password +   export FTP_PASSWORD=$ftp_password +   output=`nice -n $nicelevel \ +             su -c \ +             "duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"` +   exit_code=$? +   if [ $exit_code -eq 0 ]; then +      debug $output +      info "Duplicity cleanup finished successfully." +   else +      debug $output +      warning "Duplicity cleanup failed." +   fi +fi + +# remove-older-than +if [ "$keep" != "yes" ]; then +   debug "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"     if [ ! $test ]; then        export PASSPHRASE=$password        export FTP_PASSWORD=$ftp_password        output=`nice -n $nicelevel \ -         su -c \ -         "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"` +                su -c \ +                "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`        exit_code=$?        if [ $exit_code -eq 0 ]; then           debug $output -         info "Duplicity cleanup finished successfully." +         info "Duplicity remove-older-than finished successfully."        else           debug $output -         warning "Duplicity cleanup failed." -      fi -   fi -fi - -# remove-older-than -if [ "$keep" != "yes" ]; then -   if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then -      debug "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart" -      if [ ! $test ]; then -         export PASSPHRASE=$password -         export FTP_PASSWORD=$ftp_password -         output=`nice -n $nicelevel \ -                   su -c \ -                      "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"` -         exit_code=$? -         if [ $exit_code -eq 0 ]; then -            debug $output -            info "Duplicity remove-older-than finished successfully." -         else -            debug $output -            warning "Duplicity remove-older-than failed." -         fi +         warning "Duplicity remove-older-than failed."        fi     fi  fi  ### Backup command -debug "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart" +debug "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"  if [ ! $test ]; then     outputfile=`maketemp backupout`     export PASSPHRASE=$password     export FTP_PASSWORD=$ftp_password     output=`nice -n $nicelevel \               su -c \ -                "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"` +                "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`     exit_code=$?     debug $output     cat $outputfile | (while read output ; do diff --git a/handlers/ldap.in b/handlers/ldap.in index 83307ee..600f172 100644 --- a/handlers/ldap.in +++ b/handlers/ldap.in @@ -91,7 +91,7 @@ if [ "$ldif" == "yes" ]; then              execstr="$execstr > $dumpdir/$dbsuffix.ldif"           fi           debug "$execstr" -         output=`su root -c "set -o pipefail ; $execstr" 2>&1` +         output=`su root -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`           code=$?           if [ "$code" == "0" ]; then              debug $output diff --git a/handlers/mysql.in b/handlers/mysql.in index 0282046..05ea396 100644 --- a/handlers/mysql.in +++ b/handlers/mysql.in @@ -303,7 +303,7 @@ then        debug "su $user -c \"$execstr\""        if [ ! $test ]        then -         output=`su $user -c "set -o pipefail ; $execstr" 2>&1` +         output=`su $user -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`           code=$?           if [ "$code" == "0" ]           then diff --git a/handlers/pgsql.helper.in b/handlers/pgsql.helper.in index ff1cfd4..82e6b48 100644 --- a/handlers/pgsql.helper.in +++ b/handlers/pgsql.helper.in @@ -67,6 +67,21 @@ pgsql_wizard() {        pgsql_compress="compress = no"     fi +   # pg_dump format, defaults to plain, custom is recommended by PostgreSQL +   menuBox "$pgsql_title" "Choose a pg_dump format:" \ +      plain "Default plain-text sql script, use with psql." \ +      tar "More flexible than the plain, use with pg_restore." \ +      custom "The most flexible format, use with pg_restore." +   if [ $? = 0 ]; then +      result="$REPLY" +      case "$result" in +         "tar") pgsql_format="format = tar";; +         "custom") pgsql_format="format = custom";; +         *) pgsql_format = "format = plain";; +      esac +   fi +          +     # write config file     get_next_filename $configdirectory/20.pgsql     cat >> $next_filename <<EOF @@ -97,7 +112,22 @@ $pgsql_databases  # if yes, compress the pg_dump/pg_dumpall output.  $pgsql_compress +# format = < plain | tar | custom > (default = plain) +# plain -  Output a plain-text SQL script file with the extension .sql. +#          When dumping all databases, a single file is created via pg_dumpall. +# tar -    Output a tar archive suitable for input into pg_restore. More  +#          flexible than plain and can be manipulated by standard Unix tools  +#          such as tar. Creates a globals.sql file and an archive per database. +# custom - Output a custom PostgreSQL pg_restore archive. This is the most +#          flexible format allowing selective import and reordering of database +#          objects at the time the database is restored via pg_restore. This +#          option creates a globals.sql file containing the cluster role and +#          other information dumped by pg_dumpall -g and a pg_restore file +#          per selected database. See the pg_dump and pg_restore man pages. +$pgsql_format +  ### You can also set the following variables in backupninja.conf: +# PSQL: psql path (default: /usr/bin/psql)  # PGSQLDUMP: pg_dump path (default: /usr/bin/pg_dump)  # PGSQLDUMPALL: pg_dumpall path (default: /usr/bin/pg_dumpall)  # PGSQLUSER: user running PostgreSQL (default: postgres) diff --git a/handlers/pgsql.in b/handlers/pgsql.in index 0b7badf..ff71ebc 100644 --- a/handlers/pgsql.in +++ b/handlers/pgsql.in @@ -8,6 +8,8 @@ getconf backupdir /var/backups/postgres  getconf databases all  getconf compress yes  getconf vsname +# format maps to pg_dump --format= option, old/default was plain +getconf format plain  localhost=`hostname` @@ -35,17 +37,31 @@ fi  # Make sure that the system to backup has the needed executables  if [ $usevserver = yes ]; then     debug "Examining vserver '$vsname'." -   if [ "$databases" == "all" ]; then +   if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then        [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \           fatal "Can't find $PGSQLDUMPALL in vserver $vsname." +   elif [ "$format" != "plain" ]; then +      [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \ +         fatal "Can't find $PGSQLDUMPALL in vserver $vsname." +      [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \ +         fatal "Can't find $PGSQLDUMP in vserver $vsname." +      [ -x "$vroot`$VSERVER $vsname exec which $PSQL`" ] || \ +         fatal "Can't find $PSQL in vserver $vsname."     else        [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \           fatal "Can't find $PGSQLDUMP in vserver $vsname."     fi  else -   if [ "$databases" == "all" ]; then +   if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then +      [ -x "`which $PGSQLDUMPALL`" ] || \ +         fatal "Can't find $PGSQLDUMPALL." +   elif [ "$format" != "plain" ]; then        [ -x "`which $PGSQLDUMPALL`" ] || \           fatal "Can't find $PGSQLDUMPALL." +      [ -x "`which $PGSQLDUMP`" ] || \ +         fatal "Can't find $PGSQLDUMP." +      [ -x "`which $PSQL`" ] || \ +         fatal "Can't find $PSQL."     else        [ -x "`which $PGSQLDUMP`" ] || \           fatal "Can't find $PGSQLDUMP." @@ -71,17 +87,41 @@ chown $pguid $vroot$backupdir  debug "chmod 700 $vroot$backupdir"  chmod 700 $vroot$backupdir + +# If we are using the custom (best) or tar pg_dump format, and +# dumping "all" databases, we will substitute "all" for a list +# of all non-template databases to avoid the use of pg_dumpall. +dumpglobals="no" +if [ "$databases" = "all" ] && [ "$format" != "plain" ]; then +   cmdprefix="" +   if [ "$usevserver" = "yes" ]; then +      	cmdprefix="$VSERVER $vsname exec " +   fi +   execstr="${cmdprefix} su - $PGSQLUSER -c 'psql -AtU $PGSQLUSER -c \"SELECT datname FROM pg_database WHERE NOT datistemplate\"'" +   debug execstr +   dblist="" +   for db in $(eval $execstr 2>&1); do +      dblist="$dblist $db" +   done +   if [ "$dblist" != "" ]; then +      databases="$dblist" +   fi +   # Dump globals (pg_dumpall -g) for roles and tablespaces +   dumpglobals="yes" +fi + +  # if $databases = all, use pg_dumpall  if [ "$databases" == "all" ]; then     if [ $usevserver = yes ]; then        if [ "$compress" == "yes" ]; then -         execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\"" +         execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""        else           execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${vsname}.sql'\""        fi     else        if [ "$compress" == "yes" ]; then -         execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\"" +         execstr="su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""        else           execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${localhost}-all.sql'\""        fi @@ -101,20 +141,58 @@ if [ "$databases" == "all" ]; then  # else use pg_dump on each specified database  else -   for db in $databases; do +   # If we're not doing plain format, database=all may now be database=list +   # so we track the database=all selection in dumpglobals which tells us +   # to also dump the roles and tablespaces via pg_dumpall -g +   if [ "$dumpglobals" = "yes" ]; then +      globalscmd="" +      if [ "$compress" == "yes" ]; then +         globalscmd="set -o pipefail ; $PGSQLDUMPALL -g | $GZIP $GZIP_OPTS > '$backupdir/globals.sql.gz'" +      else +         globalscmd="$PGSQLDUMPALL -g > '$backupdir/globals.sql'" +      fi        if [ $usevserver = yes ]; then -         if [ "$compress" == "yes" ]; then -            execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\"" -         else -            execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > '$backupdir/${db}.sql'\"" -         fi +         execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\""        else -         if [ "$compress" == "yes" ]; then -            execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\"" +         execstr="su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\"" +      fi +      debug "$execstr" +      if [ ! $test ]; then +         output=`eval $execstr 2>&1` +         code=$? +         if [ "$code" == "0" ]; then +            debug $output +            info "Successfully finished pgsql globals (roles and tablespaces) dump"           else -            execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > '$backupdir/${db}.sql'\"" +            warning $output +            warning "Failed to dump pgsql globals (roles and tablespaces)"           fi        fi +   fi +   for db in $databases; do +      dumpext="sql" +      if [ "$format" != "plain" ]; then +         dumpext="pg_dump" +      fi +      # To better support the backupninja global GZIP and rsync-friendly GZIP_OPTS +      # the custom archive format is told to disable compression. The plain format +      # is uncompressed by default and the tar format doesn't support pg_dump compression. +      disablecustomcompress="" +      if [ "$format" = "custom" ]; then +         disablecustomcompress="--compress=0" +      fi +      dumpcmd="" +      globalscmd="" +      if [ "$compress" == "yes" ]; then +         dumpcmd="set -o pipefail ; $PGSQLDUMP --format=$format ${disablecustomcompress} $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.${dumpext}.gz'" +      else +         dumpcmd="$PGSQLDUMP --format=$format ${disablecustomcompress} $db | > '$backupdir/${db}.${dumpext}'" +      fi +      if [ $usevserver = yes ]; then +         execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\"" +      else +         execstr="su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\"" +      fi        debug "$execstr"        if [ ! $test ]; then           output=`eval $execstr 2>&1` diff --git a/handlers/rdiff.in b/handlers/rdiff.in index 60386fa..e391edd 100644 --- a/handlers/rdiff.in +++ b/handlers/rdiff.in @@ -219,7 +219,10 @@ SAVEIFS=$IFS  IFS=$(echo -en "\n\b")  for i in $exclude; do     str="${i//__star__/*}" -   execstr="${execstr}--exclude '$str' " +   case "$str" in +      @*) execstr="${execstr}--exclude-globbing-filelist '${str#@}' " ;; +      *) execstr="${execstr}--exclude '$str' " ;; +   esac  done  IFS=$SAVEIFS  # includes @@ -228,7 +231,10 @@ IFS=$(echo -en "\n\b")  for i in $include; do     [ "$i" != "/" ] || fatal "Sorry, you cannot use 'include = /'"     str="${i//__star__/*}" -   execstr="${execstr}--include '$str' " +   case "$str" in +   @*) execstr="${execstr}--include-globbing-filelist '${str#@}' " ;; +   *) execstr="${execstr}--include '$str' " ;; +   esac  done  IFS=$SAVEIFS diff --git a/handlers/rsync.in b/handlers/rsync.in index fea7e7b..7b06c24 100644 --- a/handlers/rsync.in +++ b/handlers/rsync.in @@ -384,14 +384,14 @@ function rotate_long {    for rottype in daily weekly monthly; do      seconds=$((seconds_${rottype})) -      dir="$backuproot/$rottype" -    metadata="$backuproot/metadata/$rottype.1" -    mkdir -p $metadata +    metadata="$backuproot/metadata/$rottype" + +    mkdir -p $metadata.1      if [ ! -d $dir.1 ]; then        echo "Debug: $dir.1 does not exist, skipping."        continue 1 -    elif [ ! -f $metadata/created ] && [ ! -f $metadata/rotated ]; then +    elif [ ! -f $metadata.1/created ] && [ ! -f $metadata.1/rotated ]; then        echo "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."        continue 1      fi @@ -401,10 +401,10 @@ function rotate_long {      [ "$oldest" == "" ] && oldest=0      for (( i=$oldest; i > 0; i-- )); do        if [ -d $dir.$i ]; then -        if [ -f $metadata/created ]; then -          created=`tail -1 $metadata/created` -        elif [ -f $metadata/rotated ]; then -          created=`tail -1 $metadata/rotated` +        if [ -f $metadata.$i/created ]; then +          created=`tail -1 $metadata.$i/created` +        elif [ -f $metadata.$i/rotated ]; then +          created=`tail -1 $metadata.$i/rotated`          else            created=0          fi @@ -414,8 +414,8 @@ function rotate_long {            if [ ! -d $dir.$next ]; then              echo "Debug: $rottype.$i --> $rottype.$next"              $nice mv $dir.$i $dir.$next -            mkdir -p $backuproot/metadata/$rottype.$next -            date +%c%n%s > $backuproot/metadata/$rottype.$next/rotated +            mkdir -p $metadata.$next +            date +%c%n%s > $metadata.$next/rotated            else              echo "Debug: skipping rotation of $dir.$i because $dir.$next already exists."            fi @@ -485,14 +485,14 @@ function rotate_long_remote {    for rottype in daily weekly monthly; do      seconds=\$((seconds_\${rottype})) -      dir="$backuproot/\$rottype" -    metadata="$backuproot/metadata/\$rottype.1" -    mkdir -p \$metadata +    metadata="$backuproot/metadata/\$rottype" + +    mkdir -p \$metadata.1      if [ ! -d \$dir.1 ]; then        echo "Debug: \$dir.1 does not exist, skipping."        continue 1 -    elif [ ! -f \$metadata/created ] && [ ! -f \$metadata/rotated ]; then +    elif [ ! -f \$metadata.1/created ] && [ ! -f \$metadata.1/rotated ]; then        echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation."        continue 1      fi @@ -502,10 +502,10 @@ function rotate_long_remote {      [ "\$oldest" == "" ] && oldest=0      for (( i=\$oldest; i > 0; i-- )); do        if [ -d \$dir.\$i ]; then -        if [ -f \$metadata/created ]; then -          created=\`tail -1 \$metadata/created\` -        elif [ -f \$metadata/rotated ]; then -          created=\`tail -1 \$metadata/rotated\` +        if [ -f \$metadata.\$i/created ]; then +          created=\`tail -1 \$metadata.\$i/created\` +        elif [ -f \$metadata.\$i/rotated ]; then +          created=\`tail -1 \$metadata.\$i/rotated\`          else            created=0          fi @@ -515,8 +515,8 @@ function rotate_long_remote {            if [ ! -d \$dir.\$next ]; then              echo "Debug: \$rottype.\$i --> \$rottype.\$next"              $nice mv \$dir.\$i \$dir.\$next -            mkdir -p $backuproot/metadata/\$rottype.\$next -            date +%c%n%s > $backuproot/metadata/\$rottype.\$next/rotated +            mkdir -p \$metadata.\$next +            date +%c%n%s > \$metadata.\$next/rotated            else              echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."            fi @@ -528,7 +528,7 @@ function rotate_long_remote {    done    max=\$((keepdaily+1)) -  if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d \$backuproot/weekly.1 ]; then +  if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d $backuproot/weekly.1 ]; then      echo "Debug: daily.\$max --> weekly.1"      $nice mv $backuproot/daily.\$max $backuproot/weekly.1      mkdir -p $backuproot/metadata/weekly.1 | 
