This commit is contained in:
Hao Jiang 2024-03-19 17:47:46 -06:00
Родитель e4f3ae7826
Коммит 9a8c195637
173 изменённых файлов: 0 добавлений и 14612 удалений

Просмотреть файл

@ -1,12 +0,0 @@
# Contributing
Looking to contribute something to this project? That is great, we always appreciate pull requests! Here's how you can help:
1. Fork the project to your account.
2. Clone the fork (`git clone https://github.com/[username]/backup-utils.git`).
3. Create a new feature branch (`git checkout -b my-feature-branch`).
4. Add and then commit your changes (`git commit -am "Add a new backup endpoint."`).
5. Push your feature branch to GitHub.com (`git push -u origin my-feature-branch`).
6. Open a [Pull Request](https://github.com/github/backup-utils/compare/) and wait for our feedback.
Have a look at the [styleguide](https://github.com/github/backup-utils/tree/master/STYLEGUIDE.md) to make sure your code style is consistent with the code in this repository.

Просмотреть файл

@ -1,76 +0,0 @@
# Multi stage build for backup-utils
# Build layer is for compiling rsync from source
# Runtime layer is for running backup-utils
# https://docs.docker.com/develop/develop-images/multistage-build/
# https://docs.docker.com/engine/userguide/eng-image/multistage-build/
# Build layer
FROM ubuntu:focal AS build
# Install build dependencies
RUN apt-get update && apt-get install --no-install-recommends -y \
gcc \
g++ \
gawk \
autoconf \
make \
automake \
python3-cmarkgfm \
acl \
libacl1-dev \
attr \
libattr1-dev \
libxxhash-dev \
libzstd-dev \
liblz4-dev \
libssl-dev \
git \
jq \
bc \
curl \
tar \
gzip \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Download rsync source from https://github.com/WayneD/rsync/archive/refs/tags/[TAG].tar.gz pinned to specified tag
ARG RSYNC_TAG=v3.2.7
RUN curl https://github.com/WayneD/rsync/archive/refs/tags/${RSYNC_TAG}.tar.gz -L -o ${RSYNC_TAG}.tar.gz
RUN mkdir -p /rsync-${RSYNC_TAG}&& tar -xzf ${RSYNC_TAG}.tar.gz -C /rsync-${RSYNC_TAG} --strip-components=1 && ls -la
# Change to the working directory of the rsync source
WORKDIR /rsync-${RSYNC_TAG}
RUN ls -la && ./configure
RUN make
RUN make install
# Reset working directory
WORKDIR /
# Runtime layer
FROM ubuntu:focal AS runtime
# Install runtime dependencies - bash, git, OpenSSH 5.6 or newer, and jq v1.5 or newer.
RUN apt-get update && apt-get install --no-install-recommends -y \
bash \
git \
openssh-client \
jq \
bc \
moreutils \
gawk \
ca-certificates \
xxhash \
&& rm -rf /var/lib/apt/lists/*
# Copy rsync from build layer
COPY --from=build /usr/local/bin/rsync /usr/local/bin/rsync
# Copy backup-utils from repository into /backup-utils
COPY ./ /backup-utils/
WORKDIR /backup-utils
RUN chmod +x /backup-utils/share/github-backup-utils/ghe-docker-init
ENTRYPOINT ["/backup-utils/share/github-backup-utils/ghe-docker-init"]
CMD ["ghe-host-check"]

Просмотреть файл

@ -1,20 +0,0 @@
FROM alpine:latest
RUN apk --update --no-cache add \
tar \
rsync \
ca-certificates \
openssh \
git \
bash \
gawk \
procps \
coreutils
COPY ./ /backup-utils/
WORKDIR /backup-utils
RUN chmod +x /backup-utils/share/github-backup-utils/ghe-docker-init
ENTRYPOINT ["/backup-utils/share/github-backup-utils/ghe-docker-init"]
CMD ["ghe-host-check"]

20
LICENSE
Просмотреть файл

@ -1,20 +0,0 @@
Copyright (c) 2014 GitHub Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Просмотреть файл

@ -1,27 +0,0 @@
SHELL = /bin/sh
test: info
@echo Running tests
@script/test
info:
@echo This is github/backup-utils
@echo shell is $(shell ls -l $(SHELL) | sed 's@.*/bin/sh@/bin/sh@')
@rsync --version | head -1
@echo
dist:
@script/package-tarball
deb:
@script/package-deb
clean:
rm -rf dist
# List pull requests that need to be merged into stable
# (helpful for the release notes)
pending-prs:
@git log stable...master | grep "Merge pull request"
.PHONY: test info dist clean pending-prs

Просмотреть файл

@ -1,250 +0,0 @@
## Bash Style Guide
If you've not done much Bash development before you may find these debugging tips useful: http://wiki.bash-hackers.org/scripting/debuggingtips.
---
##### Scripts must start with `#!/usr/bin/env bash`
---
##### Use `set -e`
If the return value of a command can be ignored, suffix it with `|| true`:
```bash
set -e
command_that_might_fail || true
command_that_should_not_fail
```
Note that ignoring an exit status with `|| true` is not a good practice though. Generally speaking, it's better to handle the error.
---
##### Avoid manually checking exit status with `$?`
Rely on `set -e` instead:
```bash
cmd
if [ $? -eq 0 ]; then
echo worked
fi
```
should be written as:
```bash
set -e
if cmd; then
echo worked
fi
```
---
##### Include a usage, description and optional examples
Use this format:
```bash
#!/usr/bin/env bash
#/ Usage: ghe-this-is-my-script [options] <required_arg>
#/
#/ This is a brief description of the script's purpose.
#/
#/ OPTIONS:
#/ -h | --help Show this message.
#/ -l | --longopt <required_arg> An option.
#/ -c <required_arg> Another option.
#/
#/ EXAMPLES: (optional section but nice to have when not trivial)
#/
#/ This will do foo and bar:
#/ $ ghe-this-is-my-script --longopt foobar -c 2
#/
set -e
```
If there are no options or required arguments, the `OPTIONS` section can be ignored.
---
##### Customer-facing scripts must accept both -h and --help arguments
They should also print the usage information and exit 2.
For example:
```bash
#!/usr/bin/env bash
#/ Usage: ghe-this-is-my-script [options] <required_arg>
#/
#/ This is a brief description of the script's purpose.
set -e
if [ "$1" = "--help" -o "$1" = "-h" ]; then
grep '^#/' <"$0" | cut -c 4-
exit 2
fi
```
---
##### Avoid Bash arrays
Main issues:
* Portability
* Important bugs in Bash versions < 4.3
---
##### Use `test` or `[` whenever possible
```bash
test -f /etc/passwd
test -f /etc/passwd -a -f /etc/group
if [ "string" = "string" ]; then
true
fi
```
---
##### Scripts may use `[[` for advanced bash features
```bash
if [[ "$(hostname)" = *.iad.github.net ]]; then
true
fi
```
---
##### Scripts may use Bash for loops
Preferred:
```bash
for i in $(seq 0 9); do
done
```
or:
```bash
for ((n=0; n<10; n++)); do
done
```
---
##### Use `$[x+y*z]` for mathematical expressions
```bash
local n=1
let n++
n=$[n+1] # preferred
n=$[$n+1]
n=$((n+1))
n=$(($n+1))
```
---
##### Use variables sparingly
Short paths and other constants should be repeated liberally throughout code since they
can be search/replaced easily if they ever change.
```bash
DATA_DB_PATH=/data/user/db
mkdir -p $DATA_DB_PATH
rsync $DATA_DB_PATH remote:$DATA_DB_PATH
```
versus the much more readable:
```bash
mkdir -p /data/user/db
rsync /data/user/db remote:/data/user/db
```
---
##### Use lowercase and uppercase variable names
Use lowercase variables for locals and internal variables, and uppercase for variables inherited or exported via the environment
```bash
#!/usr/bin/env bash
#/ Usage: [DEBUG=0] process_repo <nwo>
nwo=$1
[ -n $DEBUG ] && echo "** processing $nwo" >&2
export GIT_DIR=/data/repos/$nwo.git
git rev-list
```
---
##### Use `${var}` for interpolation only when required
```bash
greeting=hello
echo $greeting
echo ${greeting}world
```
---
##### Use functions sparingly, opting for small/simple/sequential scripts instead whenever possible
When defining functions, use the following style:
```bash
my_function() {
local arg1=$1
[ -n $arg1 ] || return
...
}
```
---
##### Use `<<heredocs` when dealing with multi-line strings
- `<<eof` and `<< eof` will allow interpolation
- `<<"eof"` and `<<'eof'` will disallow interpolation
- `<<-eof` and `<<-"eof"` will strip off leading tabs first
```bash
cat <<"eof" | ssh $remote -- bash
foo=bar
echo $foo # interpolated on remote side after ssh
eof
```
```bash
bar=baz
cat <<eof | ssh $remote -- bash
echo $bar > /etc/foo # interpolated before ssh
chmod 0600 /etc/foo
eof
```
---
##### Quote variables that could reasonably have a space now or in the future
```bash
if [ ! -z "$packages" ]; then
true
fi
```
---
##### Use two space indentation
---
##### Scripts should not produce errors or warnings when checked with ShellCheck
Use inline comments to disable specific tests, and explain why the test has been disabled.
```bash
hexToAscii() {
# shellcheck disable=SC2059 # $1 needs to be interpreted as a formatted string
printf "\x$1"
}
```
### Testing
See [the style guide](https://github.com/github/backup-utils/blob/master/test/STYLEGUIDE.md)

Просмотреть файл

@ -1,123 +0,0 @@
# GitHub Enterprise Server backup configuration file
# The hostname of the GitHub Enterprise Server appliance to back up. The host
# must be reachable via SSH from the backup host.
GHE_HOSTNAME="github.example.com"
# Path to where backup data is stored. By default this is the "data"
# directory next to this file but can be set to an absolute path
# elsewhere for backing up to a separate partition / mount point.
GHE_DATA_DIR="data"
# The number of backup snapshots to retain. Old snapshots are pruned after each
# successful ghe-backup run. This option should be tuned based on the frequency
# of scheduled backup runs. If backups are scheduled hourly, snapshots will be
# available for the past N hours; if backups are scheduled daily, snapshots will
# be available for the past N days ...
GHE_NUM_SNAPSHOTS=10
# Pruning snapshots can be scheduled outside of the backup process.
# If set to 'yes', snapshots will not be pruned by ghe-backup.
# Instead, ghe-pruning-snapshots will need to be invoked separately via cron
#GHE_PRUNING_SCHEDULED=yes
# If --incremental is used to generate incremental MySQL backups with ghe-backup,
# then you need to specify how many cycles of full and incremental backups will be
# performed before the next full backup is created.
# For example, if `GHE_INCREMENTAL_BACKUP_MAX` is set to 14, backup-utils will
# run 1 full backup and then 13 incremental backups before performing another full backup on the next cycle.
#GHE_INCREMENTAL_MAX_BACKUPS=14
# If GHE_SKIP_CHECKS is set to true (or if --skip-checks is used with ghe-backup) then ghe-host-check
# disk space validation and software version checks on the backup-host will be disabled.
#GHE_SKIP_CHECKS=false
# The hostname of the GitHub appliance to restore. If you've set up a separate
# GitHub appliance to act as a standby for recovery, specify its IP or hostname
# here. The host to restore to may also be specified directly when running
# ghe-restore so use of this variable isn't strictly required.
#
#GHE_RESTORE_HOST="github-standby.example.com"
# If set to 'yes', ghe-restore will omit the restore of audit logs.
#
#GHE_RESTORE_SKIP_AUDIT_LOGS=no
# When verbose output is enabled with `-v`, it's written to stdout by default. If
# you'd prefer it to be written to a separate file, set this option.
#
#GHE_VERBOSE_LOG="/var/log/backup-verbose.log"
# Any extra options passed to the SSH command.
# In a single instance environment, nothing is required by default.
# In a clustering environment, "-i abs-path-to-ssh-private-key" is required.
#
#GHE_EXTRA_SSH_OPTS=""
# Any extra options passed to the rsync command. Nothing required by default.
#
#GHE_EXTRA_RSYNC_OPTS=""
# If set to 'yes', rsync will be set to use compression during backups and restores transfers. Defaults to 'no'.
#
#GHE_RSYNC_COMPRESSION_ENABLED=yes
# If enabled and set to 'no', rsync warning message during backups will be suppressed.
#RSYNC_WARNING=no
# If set to 'yes', logging output will be colorized.
#
#OUTPUT_COLOR=no
# If set to 'no', GHE_DATA_DIR will not be created automatically
# and restore/backup will exit 8
#
#GHE_CREATE_DATA_DIR=yes
# If set to 'yes', git fsck will run on the repositories
# and print some additional info.
#
# WARNING: do not enable this, only useful for debugging/development
#GHE_BACKUP_FSCK=no
# Cadence of MSSQL backups
# <full>,<differential>,<transactionlog> all in minutes
# e.g.
# - Full backup every week (10080 minutes)
# - Differential backup every day (1440 minutes)
# - Transactionlog backup every 15 minutes
#
#GHE_MSSQL_BACKUP_CADENCE=10080,1440,15
# If set to 'yes', ghe-backup jobs will run in parallel. Defaults to 'no'.
#
#GHE_PARALLEL_ENABLED=yes
# Sets the maximum number of jobs to run in parallel. Defaults to the number
# of available processing units on the machine.
#
#GHE_PARALLEL_MAX_JOBS=2
# Sets the maximum number of rsync jobs to run in parallel. Defaults to the
# configured GHE_PARALLEL_MAX_JOBS, or the number of available processing
# units on the machine.
#
# GHE_PARALLEL_RSYNC_MAX_JOBS=3
# When jobs are running in parallel wait as needed to avoid starting new jobs
# when the system's load average is not below the specified percentage. Defaults to
# unrestricted.
#
#GHE_PARALLEL_MAX_LOAD=50
# When running an external mysql database, run this script to trigger a MySQL backup
# rather than attempting to backup via backup-utils directly.
#EXTERNAL_DATABASE_BACKUP_SCRIPT="/bin/false"
# When running an external mysql database, run this script to trigger a MySQL restore
# rather than attempting to backup via backup-utils directly.
#EXTERNAL_DATABASE_RESTORE_SCRIPT="/bin/false"
# If set to 'yes', Pages data will be included in backup and restore. Defaults to 'yes'
#GHE_BACKUP_PAGES=no

Просмотреть файл

@ -1,406 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup [-hv] [--version]
#/
#/ Take snapshots of all GitHub Enterprise data, including Git repository data,
#/ the MySQL database, instance settings, GitHub Pages data, etc.
#/
#/ OPTIONS:
#/ -v | --verbose Enable verbose output.
#/ -h | --help Show this message.
#/ --version Display version information.
#/ -i | --incremental Incremental backup
#/ --skip-checks Skip storage/sw version checks
#/
set -e
# Parse arguments
while true; do
case "$1" in
-h|--help)
export GHE_SHOW_HELP=true
shift
;;
--version)
export GHE_SHOW_VERSION=true
shift
;;
-v|--verbose)
export GHE_VERBOSE=true
shift
;;
-i|--incremental)
export GHE_INCREMENTAL=true
shift
;;
--skip-checks)
export GHE_SKIP_CHECKS=true
shift
;;
-*)
echo "Error: invalid argument: '$1'" 1>&2
exit 1
;;
*)
break
;;
esac
done
export CALLING_SCRIPT="ghe-backup"
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/../share/github-backup-utils/ghe-backup-config"
# Check to make sure moreutils parallel is installed and working properly
ghe_parallel_check
# Used to record failed backup steps
failures=
failures_file="$(mktemp -t backup-utils-backup-failures-XXXXXX)"
# CPU and IO throttling to keep backups from thrashing around.
export GHE_NICE=${GHE_NICE:-"nice -n 19"}
export GHE_IONICE=${GHE_IONICE:-"ionice -c 3"}
# Create the timestamped snapshot directory where files for this run will live,
# change into it, and mark the snapshot as incomplete by touching the
# 'incomplete' file. If the backup succeeds, this file will be removed
# signifying that the snapshot is complete.
mkdir -p "$GHE_SNAPSHOT_DIR"
cd "$GHE_SNAPSHOT_DIR"
touch "incomplete"
# Exit early if the snapshot filesystem doesn't support hard links, symlinks and
# if rsync doesn't support hardlinking of dangling symlinks
trap 'rm -rf src dest1 dest2' EXIT
mkdir -p src
touch src/testfile
if ! ln -s /data/does/not/exist/hooks/ src/ >/dev/null 2>&1; then
log_error "Error: the filesystem containing $GHE_DATA_DIR does not support symbolic links. \nGit repositories contain symbolic links that need to be preserved during a backup." 1>&2
exit 1
fi
if ! output=$(rsync -a src/ dest1 2>&1 && rsync -av src/ --link-dest=../dest1 dest2 2>&1); then
log_error "Error: rsync encountered an error that could indicate a problem with permissions,\n hard links, symbolic links, or another issue that may affect backups." 1>&2
echo "$output"
exit 1
fi
if [ "$(stat -c %i dest1/testfile)" != "$(stat -c %i dest2/testfile)" ]; then
log_error "Error: the filesystem containing $GHE_DATA_DIR does not support hard links.\n Backup Utilities use hard links to store backup data efficiently." 1>&2
exit 1
fi
rm -rf src dest1 dest2
# To prevent multiple backup runs happening at the same time, we create a
# in-progress file with the timestamp and pid of the backup process,
# giving us a form of locking.
#
# Set up a trap to remove the in-progress file if we exit for any reason but
# verify that we are the same process before doing so.
#
# The cleanup trap also handles disabling maintenance mode on the appliance if
# it was automatically enabled.
cleanup () {
if [ -f ../in-progress ]; then
progress=$(cat ../in-progress)
snapshot=$(echo "$progress" | cut -d ' ' -f 1)
pid=$(echo "$progress" | cut -d ' ' -f 2)
if [ "$snapshot" = "$GHE_SNAPSHOT_TIMESTAMP" ] && [ "$$" = "$pid" ]; then
unlink ../in-progress
fi
fi
rm -rf "$failures_file"
rm -f "${GHE_DATA_DIR}/in-progress-backup"
rm -rf /tmp/backup-utils-progress/*
# Cleanup SSH multiplexing
ghe-ssh --clean
bm_end "$(basename $0)"
}
# Setup exit traps
trap 'cleanup' EXIT
trap 'exit $?' INT # ^C always terminate
# Check to see if there is a running restore
ghe_restore_check
# Check to see if there is a running backup
if [ -h ../in-progress ]; then
log_error "Detected a backup already in progress from a previous version of ghe-backup. \nIf there is no backup in progress anymore, please remove \nthe $GHE_DATA_DIR/in-progress file." >&2
exit 1
fi
if [ -f ../in-progress ]; then
progress=$(cat ../in-progress)
snapshot=$(echo "$progress" | cut -d ' ' -f 1)
pid=$(echo "$progress" | cut -d ' ' -f 2)
if ! ps -p "$pid" >/dev/null 2>&1; then
# We can safely remove in-progress, ghe-prune-snapshots
# will clean up the failed backup.
unlink ../in-progress
else
log_error "Error: A backup of $GHE_HOSTNAME may still be running on PID $pid. \nIf PID $pid is not a process related to the backup utilities, please remove \nthe $GHE_DATA_DIR/in-progress file and try again." 1>&2
exit 1
fi
fi
# Perform a host connection check and establish the remote appliance version.
# The version is available in the GHE_REMOTE_VERSION variable and also written
# to a version file in the snapshot directory itself.
# ghe_remote_version_required should be run before any other instances of ghe-ssh
# to ensure that there are no problems with host key verification.
ghe_remote_version_required
echo "$GHE_REMOTE_VERSION" > version
# Setup progress tracking
init-progress
export PROGRESS_TOTAL=14 # Minimum number of steps in backup is 14
echo "$PROGRESS_TOTAL" > /tmp/backup-utils-progress/total
export PROGRESS_TYPE="Backup"
echo "$PROGRESS_TYPE" > /tmp/backup-utils-progress/type
export PROGRESS=0 # Used to track progress of backup
echo "$PROGRESS" > /tmp/backup-utils-progress/progress
OPTIONAL_STEPS=0
# Backup actions+mssql
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 2))
fi
# Backup fsck
if [ "$GHE_BACKUP_FSCK" = "yes" ]; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Backup minio
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.minio.enabled'; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Backup pages
if [ "$GHE_BACKUP_PAGES" != "no" ]; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
PROGRESS_TOTAL=$((OPTIONAL_STEPS + PROGRESS_TOTAL)) # Minimum number of steps in backup is 14
echo "$PROGRESS_TOTAL" > /tmp/backup-utils-progress/total
# check that incremental settings are valid if set
is_inc=$(is_incremental_backup_feature_on)
if [ "$is_inc" = true ]; then
if [ "$GHE_VERSION_MAJOR" -lt 3 ]; then
log_error "Can only perform incremental backups on enterprise version 3.10 or higher"
exit 1
fi
if [ "$GHE_VERSION_MINOR" -lt 10 ]; then
log_error "Can only perform incremental backups on enterprise version 3.10 or higher"
exit 1
fi
incremental_backup_check
# If everything is ok, check if we have hit GHE_MAX_INCREMENTAL_BACKUPS, performing pruning actions if necessary
check_for_incremental_max_backups
# initialize incremental backup if it hasn't been done yet
incremental_backup_init
fi
echo "$GHE_SNAPSHOT_TIMESTAMP $$" > ../in-progress
echo "$GHE_SNAPSHOT_TIMESTAMP $$" > "${GHE_DATA_DIR}/in-progress-backup"
bm_start "$(basename $0)"
START_TIME=$(date +%s)
log_info "Starting backup of $GHE_HOSTNAME with backup-utils v$BACKUP_UTILS_VERSION in snapshot $GHE_SNAPSHOT_TIMESTAMP"
if [ -n "$GHE_ALLOW_REPLICA_BACKUP" ]; then
echo "Warning: backing up a high availability replica may result in inconsistent or unreliable backups."
fi
# Output system information of the backup host
# If /etc/os-release exists, use it to get the OS version
if [ -f /etc/os-release ]; then
OS_NAME=$(grep '^NAME' /etc/os-release | cut -d'"' -f2)
VERSION_ID=$(grep '^VERSION_ID' /etc/os-release | cut -d'"' -f2)
echo "Running on: $OS_NAME $VERSION_ID"
else
echo "Running on: Unknown OS"
fi
# If nproc command exists, use it to get the number of CPUs
if command -v nproc >/dev/null 2>&1; then
echo "CPUs: $(nproc)"
else
echo "CPUs: Unknown"
fi
# If the free command exists, use it to get the memory details
if command -v free >/dev/null 2>&1; then
echo "Memory $(free -m | grep '^Mem:' | awk '{print "total/used/free+share/buff/cache: " $2 "/" $3 "/" $4 "+" $5 "/" $6 "/" $7}')"
else
echo "Memory: Unknown"
fi
# Log backup start message in /var/log/syslog on remote instance
ghe_remote_logger "Starting backup from $(hostname) with backup-utils v$BACKUP_UTILS_VERSION in snapshot $GHE_SNAPSHOT_TIMESTAMP ..."
export GHE_BACKUP_STRATEGY=${GHE_BACKUP_STRATEGY:-$(ghe-backup-strategy)}
# Record the strategy with the snapshot so we will know how to restore.
echo "$GHE_BACKUP_STRATEGY" > strategy
# Create benchmark file
bm_init > /dev/null
ghe-backup-store-version ||
log_warn "Warning: storing backup-utils version remotely failed."
log_info "Backing up GitHub secrets ..."
ghe-backup-secrets || failures="$failures secrets"
log_info "Backing up GitHub settings ..."
ghe-backup-settings || failures="$failures settings"
log_info "Backing up SSH authorized keys ..."
bm_start "ghe-export-authorized-keys"
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-export-authorized-keys' > authorized-keys.json ||
failures="$failures authorized-keys"
bm_end "ghe-export-authorized-keys"
log_info "Backing up SSH host keys ..."
bm_start "ghe-export-ssh-host-keys"
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-export-ssh-host-keys' > ssh-host-keys.tar ||
failures="$failures ssh-host-keys"
bm_end "ghe-export-ssh-host-keys"
ghe-backup-mysql || failures="$failures mysql"
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
log_info "Backing up MSSQL databases ..."
ghe-backup-mssql 1>&3 || failures="$failures mssql"
log_info "Backing up Actions data ..."
ghe-backup-actions 1>&3 || failures="$failures actions"
fi
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.minio.enabled'; then
log_info "Backing up Minio data ..."
ghe-backup-minio 1>&3 || failures="$failures minio"
fi
cmd_title=$(log_info "Backing up Redis database ...")
commands=("
echo \"$cmd_title\"
ghe-backup-redis > redis.rdb || printf %s \"redis \" >> \"$failures_file\"")
cmd_title=$(log_info "Backing up audit log ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-es-audit-log || printf %s \"audit-log \" >> \"$failures_file\"")
cmd_title=$(log_info "Backing up Git repositories ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-repositories || printf %s \"repositories \" >> \"$failures_file\"")
# Pages backups are skipped only if GHE_BACKUP_PAGES is explicitly set to 'no' to guarantee backward compatibility.
# If a customer upgrades backup-utils but keeps the config file from a previous version, Pages backups still work as expected.
if [ "$GHE_BACKUP_PAGES" != "no" ]; then
cmd_title=$(log_info "Backing up GitHub Pages artifacts ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-pages || printf %s \"pages \" >> \"$failures_file\"")
fi
cmd_title=$(log_info "Backing up storage data ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-storage || printf %s \"storage \" >> \"$failures_file\"")
cmd_title=$(log_info "Backing up custom Git hooks ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-git-hooks || printf %s \"git-hooks \" >> \"$failures_file\"")
if [ "$GHE_BACKUP_STRATEGY" = "rsync" ]; then
increment-progress-total-count 1
cmd_title=$(log_info "Backing up Elasticsearch indices ...")
commands+=("
echo \"$cmd_title\"
ghe-backup-es-rsync || printf %s \"elasticsearch \" >> \"$failures_file\"")
fi
if [ "$GHE_PARALLEL_ENABLED" = "yes" ]; then
"$GHE_PARALLEL_COMMAND" "${GHE_PARALLEL_COMMAND_OPTIONS[@]}" -- "${commands[@]}"
else
for c in "${commands[@]}"; do
eval "$c"
done
fi
if [ -s "$failures_file" ]; then
failures="$failures $(cat "$failures_file")"
fi
# git fsck repositories after the backup
if [ "$GHE_BACKUP_FSCK" = "yes" ]; then
log_info "Running git fsck on repositories ..."
ghe-backup-fsck "$GHE_SNAPSHOT_DIR" || failures="$failures fsck"
fi
# If everything was successful, mark the snapshot as complete, update the
# current symlink to point to the new snapshot and prune expired and failed
# snapshots.
if [ -z "$failures" ]; then
rm "incomplete"
rm -f "../current"
ln -s "$GHE_SNAPSHOT_TIMESTAMP" "../current"
if [[ $GHE_PRUNING_SCHEDULED != "yes" ]]; then
ghe-prune-snapshots
else
log_info "Expired and incomplete snapshots to be pruned separately"
fi
else
log_info "Skipping pruning snapshots, since some backups failed..."
fi
END_TIME=$(date +%s)
log_info "Runtime: $((END_TIME - START_TIME)) seconds"
log_info "Completed backup of $GHE_HOSTNAME in snapshot $GHE_SNAPSHOT_TIMESTAMP at $(date +"%H:%M:%S")"
# Exit non-zero and list the steps that failed.
if [ -z "$failures" ]; then
ghe_remote_logger "Completed backup from $(hostname) / snapshot $GHE_SNAPSHOT_TIMESTAMP successfully."
else
steps="${failures// /, }"
ghe_remote_logger "Completed backup from $(hostname) / snapshot $GHE_SNAPSHOT_TIMESTAMP with failures: ${steps}."
log_error "Error: Snapshot incomplete. Some steps failed: ${steps}. "
ghe_backup_finished
exit 1
fi
# Detect if the created backup contains any leaked ssh keys
log_info "Checking for leaked ssh keys ..."
ghe-detect-leaked-ssh-keys -s "$GHE_SNAPSHOT_DIR" || true
log_info "Backup of $GHE_HOSTNAME finished."
# Remove in-progress file
ghe_backup_finished

Просмотреть файл

@ -1,56 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-progress [--once]
#/ Tracks the completed steps of a backup or restore operation.
#/
#/ By default the progress is printed every continuously or until a key is pressed.
#/ Use the --once option to print the current progress once and exit.
#/
#/ Options:
#/ --once Don't loop, just print the current progress once.
#
set -e
while true; do
case "$1" in
-o|--once)
ONCE=1
shift
;;
-h|--help)
export GHE_SHOW_HELP=true
shift
;;
-*)
echo "Unknown option: $1" >&2
exit 1
;;
*)
break
;;
esac
done
check_for_progress_file() {
if [ ! -f /tmp/backup-utils-progress/info ]; then
echo "No progress file found. Has a backup or restore been started?"
exit 1
fi
}
if [ -n "$ONCE" ]; then
check_for_progress_file
cat /tmp/backup-utils-progress/info
else
check_for_progress_file
clear
cat /tmp/backup-utils-progress/info
while true; do
if read -r -t 1 -n 1; then
clear
exit ;
else
clear
cat /tmp/backup-utils-progress/info
fi
done
fi

Просмотреть файл

@ -1,265 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-host-check [-h] [--version] [<host>]
#/
#/ Verify connectivity with the GitHub Enterprise Server host.
#/
#/ OPTIONS:
#/ -h | --help Show this message.
#/ --version Display version information.
#/ <host> The GitHub Enterprise Server host to check. When no
#/ <host> is provided, the $GHE_HOSTNAME configured in
#/ backup.config is assumed.
#/
set -e
while true; do
case "$1" in
-h | --help)
export GHE_SHOW_HELP=true
shift
;;
--version)
export GHE_SHOW_VERSION=true
shift
;;
-*)
echo "Error: invalid argument: '$1'" 1>&2
exit 1
;;
*)
break
;;
esac
done
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/../share/github-backup-utils/ghe-backup-config"
# Use the host provided on the command line if provided, or fallback on the
# $GHE_HOSTNAME configured in backup.config when not present.
host="${1:-$GHE_HOSTNAME}"
# Options to pass to SSH during connection check
options="
-o PasswordAuthentication=no
-o ConnectTimeout=5
-o ConnectionAttempts=1
"
# Split host:port into parts
port=$(ssh_port_part "$host")
hostname=$(ssh_host_part "$host")
set +e
# ghe-negotiate-version verifies if the target is a GitHub Enterprise Server instance
output=$(echo "ghe-negotiate-version backup-utils $BACKUP_UTILS_VERSION" | ghe-ssh -o BatchMode=no $options $host -- /bin/sh 2>&1)
rc=$?
set -e
if [ $rc -ne 0 ]; then
case $rc in
255)
if echo "$output" | grep -i "port 22: Network is unreachable\|port 22: connection refused\|port 22: no route to host\|ssh_exchange_identification: Connection closed by remote host\|Connection timed out during banner exchange\|port 22: Connection timed out" >/dev/null; then
exec "$(basename $0)" "$hostname:122"
fi
echo "$output" 1>&2
echo "Error: ssh connection with '$host' failed" 1>&2
echo "Note that your SSH key needs to be setup on $host as described in:" 1>&2
echo "* https://docs.github.com/enterprise-server/admin/configuration/configuring-your-enterprise/accessing-the-administrative-shell-ssh" 1>&2
;;
101)
echo "Error: couldn't read GitHub Enterprise Server fingerprint on '$host' or this isn't a GitHub appliance." 1>&2
;;
1)
if [ "${port:-22}" -eq 22 ] && echo "$output" | grep "use port 122" >/dev/null; then
exec "$(basename $0)" "$hostname:122"
else
echo "$output" 1>&2
fi
;;
esac
exit $rc
fi
CLUSTER=false
if ghe-ssh "$host" -- \
"[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/cluster' ]"; then
CLUSTER=true
fi
# ensure all nodes in the cluster are online/reachable and running the same version
if "$CLUSTER"; then
online_status=$(ghe-ssh "$host" ghe-cluster-host-check)
if [ "$online_status" != "Cluster is ready to configure." ]; then
echo "Error: Not all nodes are online! Please ensure cluster is in a healthy state before using backup-utils." 1>&2
exit 1
fi
node_version_list=$(ghe-ssh "$host" ghe-cluster-each -- ghe-version)
distinct_versions=$(echo "$node_version_list" | awk '{split($0, a, ":"); print a[2]}' | awk '{print $4}' | uniq | wc -l)
if [ "$distinct_versions" -ne 1 ]; then
echo "Version mismatch: $node_version_list" 1>&2
echo "Error: Not all nodes are running the same version! Please ensure all nodes are running the same version before using backup-utils." 1>&2
exit 1
fi
fi
version=$(echo "$output" | grep "GitHub Enterprise" | awk '{print $NF}')
if [ -z "$version" ]; then
echo "Error: failed to parse version on '$host' or this isn't a GitHub appliance." 1>&2
exit 2
fi
# Block restoring snapshots to older releases of GitHub Enterprise Server
if [ -n "$GHE_RESTORE_SNAPSHOT_PATH" ]; then
snapshot_version=$(cat $GHE_RESTORE_SNAPSHOT_PATH/version)
# shellcheck disable=SC2046 # Word splitting is required to populate the variables
read -r snapshot_version_major snapshot_version_minor _ <<<$(ghe_parse_version $snapshot_version)
if [ "$(version $GHE_REMOTE_VERSION)" -lt "$(version $snapshot_version_major.$snapshot_version_minor.0)" ]; then
echo "Error: Snapshot can not be restored to an older release of GitHub Enterprise Server." >&2
exit 1
fi
fi
if [ -z "$GHE_ALLOW_REPLICA_BACKUP" ]; then
if [ "$(ghe-ssh $host -- cat $GHE_REMOTE_ROOT_DIR/etc/github/repl-state 2>/dev/null || true)" = "replica" ]; then
echo "Error: high availability replica detected." 1>&2
echo "Backup Utilities should be used to backup from the primary node in" 1>&2
echo "high availability environments to ensure consistent and reliable backups." 1>&2
exit 1
fi
fi
# backup-utils 2.13 onwards limits support to the current and previous two releases
# of GitHub Enterprise Server.
supported_minimum_version="3.9.0"
if [ "$(version $version)" -ge "$(version $supported_minimum_version)" ]; then
supported=1
fi
if [ -z "$supported" ]; then
echo "Error: unsupported release of GitHub Enterprise Server detected." 1>&2
echo "Backup Utilities v$BACKUP_UTILS_VERSION requires GitHub Enterprise Server v$supported_minimum_version or newer." 1>&2
echo "Please update your GitHub Enterprise Server appliance or use an older version of Backup Utilities." 1>&2
exit 1
fi
if [[ "$CALLING_SCRIPT" == "ghe-backup" && "$GHE_SKIP_CHECKS" != "true" ]]; then
cat << SKIP_MSG
**You can disable the following storage & version checks by running ghe-backup with option "--skip-checks"
OR updating GHE_SKIP_CHECKS to 'true' in your backup.config file.
SKIP_MSG
# Bring in the requirements file
min_rsync=""
min_openssh=""
min_jq=""
# shellcheck source=share/github-backup-utils/requirements.txt
. "$(dirname "${BASH_SOURCE[0]}")/../share/github-backup-utils/requirements.txt"
#source disk size file
# shellcheck source=share/github-backup-utils/ghe-rsync-size
. "$(dirname "${BASH_SOURCE[0]}")/../share/github-backup-utils/ghe-rsync-size"
#Check if GHE_DATA_DIR is NFS mounted
fs_info=$(stat -f -c "%T" "$GHE_DATA_DIR") || true
if [ "$fs_info" == "nfs" ]; then
echo "Warning: NFS (Network File System) detected for $GHE_DATA_DIR" 1>&2
echo "Please review https://gh.io/backup-utils-storage-requirements for details." 1>&2
fi
#Display dir requirements for repositories and mysql
echo -e "\nChecking host for sufficient space for a backup..."
available_space=$(df -B 1k $GHE_DATA_DIR | awk 'END{printf "%.0f", $4 * 1024}')
echo " We recommend allocating at least 5x the amount of storage allocated to the primary GitHub appliance for historical snapshots and growth over time."
repos_disk_size=$(transfer_size repositories /tmp)
pages_disk_size=$(transfer_size pages /tmp)
es_disk_size=$(transfer_size elasticsearch /tmp)
stor_disk_size=$(transfer_size storage /tmp)
minio_disk_size=$(transfer_size minio /tmp)
mysql_disk_size=$(transfer_size mysql /tmp)
actions_disk_size=$(transfer_size actions /tmp)
mssql_disk_size=$(transfer_size mssql /tmp)
min_disk_req=$((repos_disk_size + pages_disk_size + es_disk_size + stor_disk_size + minio_disk_size + mysql_disk_size + actions_disk_size + mssql_disk_size))
recommended_disk_req=$((min_disk_req * 5))
echo " - Available space: $((available_space / (1024 ** 2))) MB"
echo " - Min Disk required for this backup is at least $min_disk_req MB"
echo -e " - Recommended Disk requirement is $recommended_disk_req MB\n"
printf '### Estimated Data Transfer Sizes
- repositories: %d MB
- pages: %d MB
- elasticsearch: %d MB
- storage: %d MB
- minio: %d MB
- mysql: %d MB
- actions: %d MB
- mssql: %d MB
\n' \
"$repos_disk_size" "$pages_disk_size" "$es_disk_size" "$stor_disk_size" "$minio_disk_size" "$mysql_disk_size" "$actions_disk_size" "$mssql_disk_size"
if [[ $((available_space / (1024 * 1024))) -lt $min_disk_req ]]; then
echo "There is not enough disk space for the backup. Please allocate more space and continue." 1>&2
exit 1
fi
#Check rsync, openssh & jq versions
commands=("jq" "rsync" "ssh")
missing_command=""
for cmd in "${commands[@]}"; do
if ! command -v "$cmd" > /dev/null 2>&1; then
missing_command+="$cmd "
fi
done
# Check if any command is missing
if [[ -n "$missing_command" ]]; then
echo "One or more required tools not found: $missing_command" 1>&2
echo "Please make sure the following utils are installed and available in your PATH: $missing_command" 1>&2
exit 1
fi
echo "### Software versions"
rsync_version=$(rsync --version | grep 'version' | awk '{print $3}' | tr -cd '[:digit:].\n')
if awk "BEGIN {exit !($rsync_version < $min_rsync)}" &> /dev/null; then
echo "rsync version $rsync_version in backup-host does not meet minimum requirements." 1>&2
echo "Please make sure you have the minimum required version of rsync: $min_rsync installed" 1>&2
exit 1
elif [[ $rsync_version < 3.2.5 ]] && [[ $RSYNC_WARNING != "no" ]]; then
printf "\n **WARNING:** rsync version %s on backup host is less than 3.2.5, which could result in performance degradation.
For more details, please read documentation at https://gh.io/april-2023-update-of-rsync-requirements
You can disable this warning by changing RSYNC_WARNING to 'no' in your backup.config file.\n\n" \
"$rsync_version"
fi
echo " - rsync ${rsync_version} >= required ($min_rsync)"
ssh_version=$(ssh -V 2>&1 | awk '{print $1}'|grep -oPm 1 '[\d\.]+' |head -1 | tr -cd '[:digit:].\n')
if awk "BEGIN {exit !($ssh_version < $min_openssh)}" &> /dev/null; then
echo "openSSH version $ssh_version in backup-host does not meet minimum requirements." 1>&2
echo "Please make sure the minimum required version of openSSH: $min_openssh is installed" 1>&2
exit 1
else
echo " - openSSH ${ssh_version} >= required ($min_openssh)"
fi
jq_version=$(jq --version |awk -F\- '{print $2}' | tr -cd '[:digit:].\n')
if awk "BEGIN {exit !($jq_version < $min_jq)}" &> /dev/null; then
echo "jq version $jq_version in backup-host does not meet minimum requirements." 1>&2
echo "Please make sure you have the minimum required version of jq: $min_jq installed" 1>&2
exit 1
else
echo " - jq ${jq_version} >= required ($min_jq)"
fi
fi
echo -e "\nConnect $hostname:$port OK (v$version)"

Просмотреть файл

@ -1,727 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore [-cfhv] [--version] [--skip-mysql] [-s <snapshot-id>] [<host>]
#/
#/ Restores a GitHub instance from local backup snapshots.
#/
#/ Note that the GitHub Enterprise host must be reachable and your SSH key must
#/ be setup as described in the following help article:
#/
#/ <https://docs.github.com/enterprise-server/admin/configuration/configuring-your-enterprise/accessing-the-administrative-shell-ssh >
#/
#/ OPTIONS:
#/ -c | --config Restore appliance settings and license in addition to
#/ datastores. Settings are not restored by default to
#/ prevent overwriting different configuration on the
#/ restore host.
#/ -f | --force Don't prompt for confirmation before restoring.
#/ -h | --help Show this message.
#/ -v | --verbose Enable verbose output.
#/ --skip-mysql Skip MySQL restore steps. Only applicable to external databases.
#/ --version Display version information and exit.
#/
#/ -s <snapshot-id> Restore from the snapshot with the given id. Available
#/ snapshots may be listed under the data directory.
#/
#/ <host> The <host> is the hostname or IP of the GitHub Enterprise
#/ instance. The <host> may be omitted when the
#/ GHE_RESTORE_HOST config variable is set in backup.config.
#/ When a <host> argument is provided, it always overrides
#/ the configured restore host.
#/
set -e
# Parse arguments
: "${RESTORE_SETTINGS:=false}"
export RESTORE_SETTINGS
: "${FORCE:=false}"
export FORCE
: "${SKIP_MYSQL:=false}"
export SKIP_MYSQL
while true; do
case "$1" in
--skip-mysql)
SKIP_MYSQL=true
shift
;;
-f|--force)
FORCE=true
shift
;;
-s)
snapshot_id="$(basename "$2")"
shift 2
;;
-c|--config)
RESTORE_SETTINGS=true
shift
;;
-h|--help)
export GHE_SHOW_HELP=true
shift
;;
--version)
export GHE_SHOW_VERSION=true
shift
;;
-v|--verbose)
export GHE_VERBOSE=true
shift
;;
-i|--incremental)
export GHE_INCREMENTAL=true
shift
;;
-*)
echo "Error: invalid argument: '$1'" 1>&2
exit 1
;;
*)
if [ -n "$1" ]; then
GHE_RESTORE_HOST_OPT="$1"
shift
else
break
fi
;;
esac
done
start_cron () {
log_info "Starting cron ..."
if $CLUSTER; then
if ! ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-each -- sudo timeout 120s service cron start"; then
log_warn "Failed to start cron on one or more nodes"
fi
else
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo timeout 120s service cron start"; then
log_warn "Failed to start cron"
fi
fi
}
cleanup () {
log_info " Exiting, cleaning up ..."
if [ -n "$1" ]; then
update_restore_status "$1"
fi
if $ACTIONS_STOPPED && ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
log_info "Restarting Actions after restore ..."
# In GHES 3.3+, ghe-actions-start no longer has a -f (force) flag. In GHES 3.2 and below, we must provide the
# force flag to make sure it can start in maintenance mode. Use it conditionally based on whether it exists
# in the --help output
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-actions-start --help' | grep -q force; then
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-actions-start -f' 1>&3
else
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-actions-start' 1>&3
fi
fi
if ! $CRON_RUNNING; then
start_cron
fi
# Cleanup SSH multiplexing
log_info "Cleaning up SSH multiplexing ..."
if ! ghe-ssh --clean; then
log_info "Failed to clean up SSH multiplexing"
fi
# Remove in-progress file
log_info "Removing in-progress file ..." 1>&3
if ! rm -f "${GHE_DATA_DIR}/in-progress-restore"; then
log_error "Failed to remove in-progress file" 1>&3
fi
# Remove progress files
rm -rf /tmp/backup-utils-progress/*
bm_end "$(basename $0)"
}
# This function's type definition is being passed to a remote host via `ghe-ssh` but is not used locally.
# because it doesn't run locally does not redirect output to fd 3 or use log_info/log_warn/log_error.
# shellcheck disable=SC2034
cleanup_cluster_nodes() {
uuid="$1"
if [ -z "$uuid" ]; then
log_error "Node UUID required."
exit 2
fi
echo "Cleaning up spokes"
ghe-spokes server evacuate "git-server-$uuid" 'Removing replica'
ghe-spokes server destroy "git-server-$uuid"
echo "Cleaning up storage"
ghe-storage destroy-host "storage-server-$uuid" --force
echo "Cleaning up dpages"
ghe-dpages offline "pages-server-$uuid"
ghe-dpages remove "pages-server-$uuid"
echo "Cleaning up redis"
ghe-redis-cli del "resque:queue:maint_git-server-$uuid"
ghe-redis-cli srem resque:queues "maint_git-server-$uuid"
}
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/../share/github-backup-utils/ghe-backup-config"
# Check to make sure moreutils parallel is installed and working properly
ghe_parallel_check
# Check to make sure another restore process is not running
ghe_restore_check
# Grab the host arg
GHE_HOSTNAME="${GHE_RESTORE_HOST_OPT:-$GHE_RESTORE_HOST}"
# Hostname without any port suffix
hostname=$(echo "$GHE_HOSTNAME" | cut -f 1 -d :)
# Show usage with no <host>
[ -z "$GHE_HOSTNAME" ] && print_usage
# Flag to indicate if this script has stopped Actions.
ACTIONS_STOPPED=false
# ghe-restore-snapshot-path validates it exists, determines what current is,
# and if there's any problem, exit for us
GHE_RESTORE_SNAPSHOT_PATH="$(ghe-restore-snapshot-path "$snapshot_id")"
GHE_RESTORE_SNAPSHOT=$(basename "$GHE_RESTORE_SNAPSHOT_PATH")
export GHE_RESTORE_SNAPSHOT
# Check to make sure backup is not running
ghe_backup_check
# Detect if the backup we are restoring has a leaked ssh key
echo "Checking for leaked keys in the backup snapshot that is being restored ..."
ghe-detect-leaked-ssh-keys -s "$GHE_RESTORE_SNAPSHOT_PATH" || true
# Figure out whether to use the tarball or rsync restore strategy based on the
# strategy file written in the snapshot directory.
GHE_BACKUP_STRATEGY=$(cat "$GHE_RESTORE_SNAPSHOT_PATH/strategy")
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# Figure out if this instance has been configured or is entirely new.
instance_configured=false
if is_instance_configured; then
instance_configured=true
else
RESTORE_SETTINGS=true
fi
# Figure out if we're restoring into cluster
CLUSTER=false
if ghe-ssh "$GHE_HOSTNAME" -- \
"[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/cluster' ]"; then
CLUSTER=true
fi
export CLUSTER
# Restoring a cluster backup to a standalone appliance is not supported
if ! $CLUSTER && [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
log_error "Error: Snapshot from a GitHub Enterprise cluster cannot be restored to a standalone appliance. Aborting." >&2
exit 1
fi
# Ensure target appliance and restore snapshot are a compatible combination with respect to BYODB
if ! ghe-restore-external-database-compatibility-check; then
exit 1
fi
# Figure out if this appliance is in a replication pair
if ghe-ssh "$GHE_HOSTNAME" -- \
"[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/repl-state' ]"; then
log_error "Error: Restoring to an appliance with replication enabled is not supported. Please teardown replication before restoring." >&2
exit 1
fi
# Prompt to verify the restore host given is correct. Restoring overwrites
# important data on the destination appliance that cannot be recovered. This is
# mostly to prevent accidents where the backup host is given to restore instead
# of a separate restore host since they're used in such close proximity.
if $instance_configured && ! $FORCE; then
echo
echo "WARNING: All data on GitHub Enterprise appliance $hostname ($GHE_REMOTE_VERSION)"
echo " will be overwritten with data from snapshot ${GHE_RESTORE_SNAPSHOT}."
echo
if is_external_database_snapshot && $RESTORE_SETTINGS; then
echo "WARNING: This operation will also restore the external MySQL connection configuration,"
echo " which may be dangerous if the GHES appliance the snapshot was taken from is still online."
echo
fi
prompt_for_confirmation "Please verify that this is the correct restore host before continuing."
fi
# Prompt to verify that restoring BYODB snapshot to unconfigured instance
# will result in BYODB connection information being restored as well.
if is_external_database_snapshot && ! $instance_configured && ! $FORCE; then
echo
echo "WARNING: This operation will also restore the external MySQL connection configuration,"
echo " which may be dangerous if the GHES appliance the snapshot was taken from is still online."
echo
prompt_for_confirmation "Please confirm this before continuing."
fi
# Calculate the actual amounts of steps in the restore process
# taking into account the options passed to the script and the appliance configuration
# calculate restore steps
OPTIONAL_STEPS=0
# Restoring UUID
if [ -s "$GHE_RESTORE_SNAPSHOT_PATH/uuid" ] && ! $CLUSTER; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Restoring Actions + MSSQL
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 2))
fi
# Restoring minio
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.minio.enabled'; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Restoring Elasticsearch
if ! $CLUSTER && [ -d "$GHE_RESTORE_SNAPSHOT_PATH/elasticsearch" ]; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Restoring audit log
if $CLUSTER || [ "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.12.9)" ]; then
if [[ "$GHE_RESTORE_SKIP_AUDIT_LOG" != "yes" ]]; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
fi
# Replica cleanup
if ! $CLUSTER && $instance_configured; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 1))
fi
# Restoring settings + restore-chat-integration + restore-packages
if $RESTORE_SETTINGS; then
OPTIONAL_STEPS=$((OPTIONAL_STEPS + 3))
fi
# Minimum number of steps is 7
export PROGRESS_TOTAL=$((OPTIONAL_STEPS + 7))
init-progress
echo "$PROGRESS_TOTAL" > /tmp/backup-utils-progress/total
export PROGRESS_TYPE="Restore"
echo "$PROGRESS_TYPE" > /tmp/backup-utils-progress/type
export PROGRESS=0 # Used to track progress of restore
echo "$PROGRESS" > /tmp/backup-utils-progress/progress
# Log restore start message locally and in /var/log/syslog on remote instance
bm_start "$(basename $0)"
START_TIME=$(date +%s)
log_info "Starting restore of $GHE_HOSTNAME with backup-utils v$BACKUP_UTILS_VERSION from snapshot $GHE_RESTORE_SNAPSHOT"
if [ "$GHE_INCREMENTAL" ]; then
if [ "$GHE_VERSION_MAJOR" -lt 3 ]; then
log_error "Can only perform incremental restores on enterprise version 3.10 or higher"
exit 1
fi
if [ "$GHE_VERSION_MINOR" -lt 10 ]; then
log_error "Can only perform incremental restores on enterprise version 3.10 or higher"
exit 1
fi
log_info "Incremental restore from snapshot $GHE_RESTORE_SNAPSHOT"
# If we see 'inc_previous' prepended to the snapshot name, then
# we set $INC_FULL_BACKUP and $INC_SNAPSHOT_DATA to $INC_PREVIOUS_FULL_BACKUP and
# $INC_PREVIOUS_SNAPSHOT_DATA respectively. Otherwise, leave them at default setting
# so that incremental restore is from current cycle
if [[ "$GHE_RESTORE_SNAPSHOT" =~ ^inc_previous ]]; then
INC_FULL_BACKUP=$INC_PREVIOUS_FULL_BACKUP
INC_SNAPSHOT_DATA=$INC_PREVIOUS_SNAPSHOT_DATA
log_info "Incremental restore from previous cycle snapshot. Using $INC_FULL_BACKUP"
log_info "Incremental restore from previous cycle snapshot. Using $INC_SNAPSHOT_DATA"
fi
log_info "Validating snapshot $GHE_RESTORE_SNAPSHOT"
validate_inc_snapshot_data "$GHE_RESTORE_SNAPSHOT"
fi
ghe_remote_logger "Starting restore from $(hostname) with backup-utils v$BACKUP_UTILS_VERSION / snapshot $GHE_RESTORE_SNAPSHOT ..."
# Create an in-progress-restore file to prevent simultaneous backup or restore runs
echo "${START_TIME} $$" > "${GHE_DATA_DIR}/in-progress-restore"
# Keep other processes on the VM or cluster in the loop about the restore status.
#
# Other processes will look for these states:
# "restoring" - restore is currently in progress
# "failed" - restore has failed
# "complete" - restore has completed successfully
update_restore_status () {
if $CLUSTER; then
echo "ghe-cluster-each -- \"echo '$1' | sudo sponge '$GHE_REMOTE_DATA_USER_DIR/common/ghe-restore-status' >/dev/null\"" |
ghe-ssh "$GHE_HOSTNAME" /bin/bash
else
echo "$1" |
ghe-ssh "$GHE_HOSTNAME" -- "sudo sponge '$GHE_REMOTE_DATA_USER_DIR/common/ghe-restore-status' >/dev/null"
fi
}
CRON_RUNNING=true
# Update remote restore state file and setup failure trap
trap "cleanup failed" EXIT
update_restore_status "restoring"
# Make sure the GitHub appliance is in maintenance mode.
if $instance_configured; then
if ! ghe-maintenance-mode-status "$GHE_HOSTNAME"; then
log_error "Error: $GHE_HOSTNAME must be put in maintenance mode before restoring. Aborting." 1>&2
exit 1
fi
fi
# Get GHES release version in major.minor format
RELEASE_VERSION=$(ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --get core.package-version' | cut -d '.' -f 1,2)
# If the backup being restored is from an appliance with Actions disabled, restoring it onto an appliance with Actions enabled will cause
# mismatches in the secrets needed for Actions which ultimately results in Actions not working properly. Note: xargs is to remove whitespace
ACTIONS_ENABLED_IN_BACKUP=$(git config -f "$GHE_RESTORE_SNAPSHOT_PATH/settings.json" --bool app.actions.enabled | xargs)
if [[ $ACTIONS_ENABLED_IN_BACKUP != true ]] && ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
log_error "Restoring a backup with Actions disabled onto an appliance with Actions enabled is not supported." >&2
exit 1
fi
# Make sure the GitHub appliance has Actions enabled if the snapshot contains Actions data.
# If above is true, also check if ac is present in appliance then snapshot should also contains ac databases
if [ -d "$GHE_RESTORE_SNAPSHOT_PATH/mssql" ] || [ -d "$GHE_RESTORE_SNAPSHOT_PATH/actions" ]; then
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
ac_db_ghe=$(echo 'ghe-mssql-console -y -n -q "SELECT name FROM sys.databases" | grep -i "ArtifactCache" | wc -l | tr -d " "' | ghe-ssh "$GHE_HOSTNAME" /bin/bash)
ac_db_snapshot=$(find "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/mssql/" -maxdepth 1 -name 'ArtifactCache*.bak' | wc -l | tr -d " ")
if [[ $ac_db_ghe -gt 0 && $ac_db_snapshot -eq 0 ]]; then
log_error "$GHE_HOSTNAME has Actions Cache service enabled but no Actions Cache data is present in snapshot to restore. Aborting \n Please disable Actions cache service in $GHE_HOSTNAME and retry\nTo disable Actions Cache service run as admin: ghe-actions-cache-disable" 1>&2
exit 1
fi
if [[ $ac_db_ghe -eq 0 && $ac_db_snapshot -gt 0 && ! $RESTORE_SETTINGS ]]; then
log_error "$GHE_HOSTNAME has Actions Cache service disabled but the snapshot is attempting to restore data for the service. Aborting. \n Please enable Actions cache service in $GHE_HOSTNAME and retry \n To enable Actions Cache service run as admin: ghe-actions-cache-enable" 1>&2
exit 1
fi
else
log_error "$GHE_HOSTNAME must have GitHub Actions enabled before restoring since the snapshot contains Actions data. Aborting. \n Setup details for enabling Actions can be found here: https://docs.github.com/en/enterprise-server@$RELEASE_VERSION/admin/github-actions/advanced-configuration-and-troubleshooting/backing-up-and-restoring-github-enterprise-server-with-github-actions-enabled" 1>&2
exit 1
fi
fi
# Create benchmark file
bm_init > /dev/null
ghe-backup-store-version ||
log_warn "Warning: storing backup-utils version remotely failed."
# Stop cron and timerd, as scheduled jobs may disrupt the restore process.
log_info "Stopping cron and github-timerd ..."
if $CLUSTER; then
bm_start "$(basename $0) - Stopping cron and github-timerd on cluster"
if ! ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-each -- sudo service cron stop"; then
log_warn "Failed to stop cron on one or more nodes" 1>&3
fi
bm_end "$(basename $0) - Stopping cron and github-timerd on cluster"
if [ "$GHE_VERSION_MAJOR" -eq "3" ]; then
if ghe-ssh "$GHE_HOSTNAME" -- "systemctl -q is-active nomad && nomad job status --short github-timerd &>/dev/null"; then
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo nomad stop github-timerd 1>/dev/null"; then
log_warn "Failed to stop github-timerd on one or more nodes" 1>&3
fi
fi
else
if ! ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-each -- sudo service github-timerd stop"; then
log_warn "Failed to stop github-timerd on one or more nodes" 1>&3
fi
fi
else
bm_start "$(basename $0) - Stopping cron and github-timerd"
echo "$(basename $0) - Stopping cron and github-timerd"
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo service cron stop"; then
log_warn "Failed to stop cron" 1>&3
fi
bm_end "$(basename $0) - Stopping cron and github-timerd"
if [ "$GHE_VERSION_MAJOR" -eq "3" ]; then
if ghe-ssh "$GHE_HOSTNAME" -- "systemctl -q is-active nomad && nomad job status --short github-timerd &>/dev/null"; then
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo nomad stop github-timerd 1>/dev/null"; then
log_warn "Failed to stop github-timerd" 1>&3
fi
fi
else
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo service github-timerd stop"; then
log_warn "Failed to stop github-timerd" 1>&3
fi
fi
fi
CRON_RUNNING=false
ghe-restore-secrets "$GHE_HOSTNAME"
# Restore settings and license if restoring to an unconfigured appliance or when
# specified manually.
if $RESTORE_SETTINGS; then
ghe-restore-settings "$GHE_HOSTNAME"
fi
# Make sure mysql and elasticsearch are prep'd and running before restoring.
# These services will not have been started on appliances that have not been
# configured yet.
if ! $CLUSTER; then
echo "sudo ghe-service-ensure-mysql && sudo ghe-service-ensure-elasticsearch" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/sh 1>&3
fi
# Restore UUID if present and not restoring to cluster.
if [ -s "$GHE_RESTORE_SNAPSHOT_PATH/uuid" ] && ! $CLUSTER; then
log_info "Restoring UUID ..."
bm_start "$(basename $0) - Restore UUID"
ghe-ssh "$GHE_HOSTNAME" -- "sudo sponge '$GHE_REMOTE_DATA_USER_DIR/common/uuid' 2>/dev/null" <"$GHE_RESTORE_SNAPSHOT_PATH/uuid"
ghe-ssh "$GHE_HOSTNAME" -- "sudo systemctl stop consul" || true
ghe-ssh "$GHE_HOSTNAME" -- "sudo rm -rf /data/user/consul/raft"
bm_end "$(basename $0) - Restore UUID"
fi
if is_external_database_snapshot; then
appliance_strategy="external"
backup_snapshot_strategy="external"
else
if is_binary_backup_feature_on; then
appliance_strategy="binary"
else
appliance_strategy="logical"
fi
if is_binary_backup "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"; then
backup_snapshot_strategy="binary"
else
backup_snapshot_strategy="logical"
fi
fi
if is_external_database_target_or_snapshot && $SKIP_MYSQL; then
log_info "Skipping MySQL restore."
else
log_info "Restoring MySQL database from ${backup_snapshot_strategy} backup snapshot on an appliance configured for ${appliance_strategy} backups ..."
increment-progress-total-count 2
ghe-restore-mysql "$GHE_HOSTNAME" 1>&3
fi
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.actions.enabled'; then
log_info "Stopping Actions before restoring databases ..."
# We mark Actions as stopped even if the `ghe-actions-stop`
# fails to ensure that we cleanly start actions when performing cleanup.
ACTIONS_STOPPED=true
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-actions-stop' 1>&3
log_info "Restoring MSSQL databases ..."
ghe-restore-mssql "$GHE_HOSTNAME" 1>&3
log_info "Restoring Actions data ..."
ghe-restore-actions "$GHE_HOSTNAME" 1>&3
echo "* WARNING: Every self-hosted Actions runner that communicates with the restored GHES server must be restarted or reconfigured in order to continue functioning."
echo "See https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners for more details on how to reconfigure self-hosted Actions runners."
fi
if ghe-ssh "$GHE_HOSTNAME" -- 'ghe-config --true app.minio.enabled'; then
log_info "Restoring MinIO data ..."
ghe-restore-minio "$GHE_HOSTNAME" 1>&3
fi
# log input into a variable for the parallel command, as the functions don't work with eval
cmd_title=$(log_info "Restoring Redis database ...")
commands=("
echo \"$cmd_title\"
ghe-restore-redis \"$GHE_HOSTNAME\" \"$GHE_RESTORE_SNAPSHOT_PATH\"")
cmd_title=$(log_info "Restoring Git Repositories ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-repositories \"$GHE_HOSTNAME\"")
cmd_title=$(log_info "Restoring Gists ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-repositories-gist \"$GHE_HOSTNAME\"")
if [ "$GHE_BACKUP_PAGES" != "no" ]; then
cmd_title=$(log_info "Restoring Pages ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-pages \"$GHE_HOSTNAME\" 1>&3")
fi
cmd_title=$(log_info "Restoring SSH authorized keys ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-ssh-keys \"$GHE_HOSTNAME\" \"$GHE_RESTORE_SNAPSHOT_PATH\"")
cmd_title=$(log_info "Restoring storage data ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-storage \"$GHE_HOSTNAME\" 1>&3")
cmd_title=$(log_info "Restoring custom Git hooks ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-git-hooks \"$GHE_HOSTNAME\" 1>&3")
if ! $CLUSTER && [ -d "$GHE_RESTORE_SNAPSHOT_PATH/elasticsearch" ]; then
cmd_title=$(log_info "Restoring Elasticsearch indices ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-es-rsync \"$GHE_HOSTNAME\" 1>&3")
fi
# Restore the audit log migration sentinel file, if it exists in the snapshot
if test -f "$GHE_RESTORE_SNAPSHOT_PATH/es-scan-complete"; then
log_info "Restoring Elasticsearch audit log migration sentinel file ..." 1>&3
if ! ghe-ssh "$GHE_HOSTNAME" -- "sudo touch $GHE_REMOTE_DATA_USER_DIR/common/es-scan-complete"; then
log_info "Failed to restore Elasticsearch audit log migration sentinel file." 1>&3
fi
fi
# Restore exported audit logs to 2.12.9 and newer single nodes and
# all releases of cluster
if $CLUSTER || [ "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.12.9)" ]; then
if [[ "$GHE_RESTORE_SKIP_AUDIT_LOGS" = "yes" ]]; then
log_info "Skipping restore of audit logs."
else
cmd_title=$(log_info "Restoring Audit logs ...")
commands+=("
echo \"$cmd_title\"
ghe-restore-es-audit-log \"$GHE_HOSTNAME\" 1>&3")
fi
fi
if [ "$GHE_PARALLEL_ENABLED" = "yes" ]; then
log_info "Restoring data in parallel ..."
"$GHE_PARALLEL_COMMAND" "${GHE_PARALLEL_COMMAND_OPTIONS[@]}" -- "${commands[@]}"
else
log_info "Restoring data serially ..." 1>&3
for c in "${commands[@]}"; do
. "$( dirname "${BASH_SOURCE[0]}" )/../share/github-backup-utils/bm.sh"
eval "$c"
done
fi
# Restart an already running memcached to reset the cache after restore
log_info "Restarting memcached ..." 1>&3
bm_start "$(basename $0) - Restarting memcached"
echo "sudo restart -q memcached 2>/dev/null || true" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/sh
bm_end "$(basename $0) - Restarting memcached"
# Prevent GitHub Connect jobs running before we've had a chance to reset
# the configuration by setting the last run date to now.
if ! $RESTORE_SETTINGS; then
log_info "Setting last run date for GitHub Connect jobs ..." 1>&3
echo "now=$(date +%s.0000000); ghe-redis-cli mset timer:UpdateConnectInstallationInfo \$now timer:UploadEnterpriseServerUserAccountsJob \$now timer:UploadConnectMetricsJob \$now timer:GitHubConnectPushNewContributionsJob \$now" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/sh 1>&3
fi
# When restoring to a host that has already been configured, kick off a
# config run to perform data migrations.
if $CLUSTER; then
log_info "Configuring cluster ..."
bm_start "$(basename $0) - configure cluster"
if [ "$GHE_VERSION_MAJOR" -eq "3" ]; then
ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-nomad-cleanup" 1>&3 2>&3
elif [ "$GHE_VERSION_MAJOR" -eq "2" ] && [ "$GHE_VERSION_MINOR" -eq "22" ]; then
ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-each -- /usr/local/share/enterprise/ghe-nomad-cleanup" 1>&3 2>&3
fi
ghe-ssh "$GHE_HOSTNAME" -- "ghe-cluster-config-apply" 1>&3 2>&3
bm_end "$(basename $0) - configure cluster"
elif $instance_configured; then
log_info "Configuring appliance ..."
bm_start "$(basename $0) - configure appliance"
if [ "$GHE_VERSION_MAJOR" -eq "3" ]; then
ghe-ssh "$GHE_HOSTNAME" -- "ghe-nomad-cleanup" 1>&3 2>&3
elif [ "$GHE_VERSION_MAJOR" -eq "2" ] && [ "$GHE_VERSION_MINOR" -eq "22" ]; then
ghe-ssh "$GHE_HOSTNAME" -- "/usr/local/share/enterprise/ghe-nomad-cleanup" 1>&3 2>&3
fi
ghe-ssh "$GHE_HOSTNAME" -- "ghe-config-apply" 1>&3 2>&3
bm_end "$(basename $0) - configure appliance"
fi
# Clear GitHub Connect settings stored in the restored database.
# This needs to happen after `ghe-config-apply` to ensure all migrations have run.
if ! $RESTORE_SETTINGS; then
log_info "Clearing GitHub Connect settings ..." 1>&3
echo "if [ -f /usr/local/share/enterprise/ghe-reset-gh-connect ]; then /usr/local/share/enterprise/ghe-reset-gh-connect -y; fi" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/sh 1>&3
fi
# Start cron. Timerd will start automatically as part of the config run.
start_cron
CRON_RUNNING=true
# Clean up all stale replicas on configured instances.
if ! $CLUSTER && $instance_configured; then
log_info "Cleaning up replicas..." 1>&3
bm_start "$(basename $0) - Cleanup replicas"
restored_uuid=$(cat "$GHE_RESTORE_SNAPSHOT_PATH/uuid")
other_nodes=$(echo "
set -o pipefail; \
ghe-spokes server show --json \
| jq -r '.[] | select(.host | contains(\"git-server\")).host' \
| sed 's/^git-server-//g' \
| ( grep -F -x -v \"$restored_uuid\" || true )" \
| ghe-ssh "$GHE_HOSTNAME" -- /bin/bash)
if [ -n "$other_nodes" ]; then
log_info "Cleaning up stale nodes ..."
for uuid in $other_nodes; do
# shellcheck disable=SC2034
echo "set -o pipefail; $(typeset -f cleanup_cluster_nodes); cleanup_cluster_nodes $uuid" | ghe-ssh "$GHE_HOSTNAME" 1>&3
done
fi
bm_end "$(basename $0) - Cleanup replicas"
fi
# Update the remote status to "complete". This has to happen before importing
# ssh host keys because subsequent commands will fail due to the host key
# changing otherwise.
trap "cleanup" EXIT
update_restore_status "complete"
# Log restore complete message in /var/log/syslog on remote instance
ghe_remote_logger "Completed restore from $(hostname) / snapshot ${GHE_RESTORE_SNAPSHOT}."
if ! $CLUSTER; then
log_info "Restoring SSH host keys ..."
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-import-ssh-host-keys' < "$GHE_RESTORE_SNAPSHOT_PATH/ssh-host-keys.tar" 1>&3
else
# This will make sure that Git over SSH host keys (babeld) are
# copied to all the cluster nodes so babeld uses the same keys.
log_info "Restoring Git over SSH host keys ..."
ghe-ssh "$GHE_HOSTNAME" -- "sudo tar -xpf - -C $GHE_REMOTE_DATA_USER_DIR/common" < "$GHE_RESTORE_SNAPSHOT_PATH/ssh-host-keys.tar" 1>&3
ghe-ssh "$GHE_HOSTNAME" -- "sudo chown babeld:babeld $GHE_REMOTE_DATA_USER_DIR/common/ssh_host_*" 1>&3
echo "if [ -f /usr/local/share/enterprise/ghe-cluster-config-update ]; then /usr/local/share/enterprise/ghe-cluster-config-update -s; else ghe-cluster-config-update -s; fi" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/sh 1>&3
fi
END_TIME=$(date +%s)
log_info "Runtime: $((END_TIME - START_TIME)) seconds"
log_info "Completed restore of $GHE_HOSTNAME from snapshot $GHE_RESTORE_SNAPSHOT at $(date +"%H:%M:%S")"
log_info "Restore of $GHE_HOSTNAME finished."
if ! $instance_configured; then
echo "To complete the restore process, please visit https://$hostname/setup/settings to review and save the appliance configuration."
fi

666
debian/changelog поставляемый
Просмотреть файл

@ -1,666 +0,0 @@
github-backup-utils (3.11.0) UNRELEASED; urgency=medium
* `ghe-backup-myql` and `ghe-restore-mysql` will now exit 1 on errors.
* On an instance with Actions enabled, incorrect backup and restore settings prevented the storage container name from being restored. This made the logs from that container inaccessible, and caused Actions to create a new storage container in a different location.
* When backups are run for HA instances in both primary and replica nodes a `.sync-in-progress` file will be created. This will disable `NetworkMaintenance` jobs from running and queueing up when backups are running from the primary node.
* Estimated transfer sizes will be calculated on appropriate nodes for clustered environments.
* Added support for finding the `parallel` command from the `moreutils` tool suite on more Linux distributions, including Arch Linux and Alpine Linux.
* `ghe-restore` avoids unnecessary `rsync` operations when restoring to non-clustered environments.
* `ghe-backup` and `ghe-restore` output their total runtime
* `rsync` compression is now disabled by default. The `-z` flag has been removed from the `ghe-rsync` command in multiple files to improve transfer speed and reduce CPU usage. To enable `rsync` compression globally, add `GHE_RSYNC_COMPRESSION_ENABLED=yes` to the `backup.config` file.
* Updates the Host OS version output to use `/etc/os-release` for better compatibility with other Linux distributions.
* When a NFS mount is detected for snapshots on backup hosts, backup logs will show a warning to notify the user that such a setup may incur performance issues as highlighted in [storage requirements](https://github.com/github/backup-utils-private/blob/master/docs/requirements.md#storage-requirements) documentation.
-- Andrew Mildahl <amildahl@github.com> Thu, 30 Nov 2023 01:57:12 +0000
github-backup-utils (3.10.0) UNRELEASED; urgency=medium
* Remove -o option from ps use #341
* Switch to TMPDIR before initiating SSH multiplexing workaround to prevent locking the destination filesystem #348
* Move check for git for ssh muxing into ghe-ssh #378
* Check filesystem supports hardlinks #388
* Remove check for git from ghe-ssh #393
* Clean up stale HA nodes on restore #396
-- Balwinder Sohi <bonsohi@github.com> Wed, 09 Aug 2023 19:37:10 +0000
github-backup-utils (3.9.0) UNRELEASED; urgency=medium
* Set restore status on all cluster nodes #274
* Fix pages backups and restores in GitHub Enterprise 11.10 #275
* Backup and restore custom CA certificates #281
* Set the benchmark file path consistently #283
* Suppress dd output noise #289
* Track completeness of Elasticsearch JSON dumps #298
* Use existing Elasticsearch indices to speed up transfer during a restore #310
* Include the user data directory in the benchmark name #311
* Use calculated routes when backing up storage data from a cluster #318
* Refresh the existing indices when restoring Elasticsearch indices to cluster #328
* Use git to generate short name for SSH multiplex control path #335
-- Junior Eluhu <jeluhu@github.com> Mon, 12 Jun 2023 20:46:10 +0000
github-backup-utils (3.8.0) focal; urgency=medium
-- Daniel Johnson <wirecat@github.com> Tue, 07 Feb 2023 21:43:26 +0000
github-backup-utils (3.7.0) UNRELEASED; urgency=medium
-- Devin Dooley <dooleydevin@github.com> Tue, 25 Oct 2022 00:35:38 +0000
github-backup-utils (3.6.0) UNRELEASED; urgency=medium
-- Joe Franks <joefranks1993@github.com> Wed, 17 Aug 2022 19:20:54 +0000
github-backup-utils (3.5.0) UNRELEASED; urgency=medium
* Simplify complex redirects for ghe-rsync #881
* On restore failure, restart cron on target host #883
-- Bon Sohi<bonsohi@github.com> Thu, 12 May 2022 21:06:56 +0000
github-backup-utils (3.4.1) UNRELEASED; urgency=medium
* Simplify complex redirects for ghe-rsync #881
* On restore failure, restart cron on target host #883
-- Donal Ellis <donal@github.com> Fri, 22 Apr 2022 04:00:00 +0000
github-backup-utils (3.3.2) UNRELEASED; urgency=medium
* Simplify complex redirects for ghe-rsync #881
* On restore failure, restart cron on target host #883
-- Donal Ellis <donal@github.com> Fri, 22 Apr 2022 00:53:45 +0000
github-backup-utils (3.4.0) UNRELEASED; urgency=medium
* Add anchor to usage doc for settings restore #865
-- Steve Culver <steveculver@github.com> Tue, 15 Feb 2022 19:25:09 +0000
github-backup-utils (3.3.1) UNRELEASED; urgency=medium
* Fix compat issue with ghe-actions-start during maintenance mode #836
-- Balwinder Sohi <bonsohi@github.com> Tue, 21 Dec 2021 23:38:01 +0000
github-backup-utils (3.3.0) UNRELEASED; urgency=medium
-- Balwinder Sohi <bonsohi@github.com> Wed, 08 Dec 2021 03:12:53 +0000
github-backup-utils (3.3.0.rc1) UNRELEASED; urgency=medium
-- Nick Iodice <nick-iodice@github.com> Tue, 09 Nov 2021 19:56:08 +0000
github-backup-utils (3.2.0) UNRELEASED; urgency=medium
-- Brett Westover <bwestover@github.com> Tue, 28 Sep 2021 16:50:00 +0000
github-backup-utils (3.2.0.rc3) UNRELEASED; urgency=medium
* Move GitHub Connect reset to after ghe-config-apply #783
-- Brett Westover <bwestover@github.com> Fri, 17 Sep 2021 00:44:59 +0000
github-backup-utils (3.2.0.rc1) UNRELEASED; urgency=medium
* Leaked host key check: Avoid false positives from FIPS mode #748
* Always restore user-password-secrets #762
* Make some of the actions setting best effort #767
* Remove reference to `ghe-cluster-cleanup-nodes` #768
* Stop/Restart Actions in ghe-restore #769
* Clear GitHub Connect settings when not restoring settings #770
* GitHub Connect Reset Issue #776
-- Brett Westover <bwestover@github.com> Fri, 10 Sep 2021 20:10:07 +0000
github-backup-utils (3.2.0) UNRELEASED; urgency=medium
* Leaked host key check: Avoid false positives from FIPS mode #748
* Always restore user-password-secrets #762
* Make some of the actions setting best effort #767
* Remove reference to `ghe-cluster-cleanup-nodes` #768
* Stop/Restart Actions in ghe-restore #769
* Clear GitHub Connect settings when not restoring settings #770
-- Brett Westover <bwestover@github.com> Thu, 09 Sep 2021 16:42:24 +0000
github-backup-utils (3.1.0) UNRELEASED; urgency=medium
-- Zachary Mark <zachary-mark@github.com> Thu, 03 Jun 2021 16:55:16 +0000
github-backup-utils (3.1.0~rc1) UNRELEASED; urgency=medium
[ Zachary Mark ]
* Update repository backups to use ghe-gc-* for lock file management #188
* A faster way to restore storage blobs (clusters) #212
* Bug fix: Be more specific in restore routes globbing #715
* fix(docker): add coreutils to get flags for date #717
* Add backup cadence variable to the appliance #719
* Fix is_default_external_database_snapshot function #720
[ Hideki Yamane ]
* debian/rules
- Drop unnecessary build-indep:
- Add manpages generation.
* debian/changelog
- Fix some lintian warnings for old entries.
* debian/source/format
- Upgrade to newer source format 3.0 (native).
* debian/copyright
- Format it as Machine-readable debian/copyright format 1.0
- Add entries Twan Wolthof <xeago@spotify.com> and me for debian/* since
enough code was written by non-github.com employees, at least.
* debian/control
- Update descriptions in debian/control
- Fix "Architecture: all", instead of "any".
- Declare its Homepage as https://github.com/github/backup-utils
- Add Vcs-* metadata field.
- Specify "Rules-Requires-Root: no"
- Drop unnecessary "Build-Depends: devscripts"
- Set Standards-Version: 4.5.1
- Update Maintainer as Zachary Mark <zachary-mark@github.com>
* debian/{clean,manpages}
- Add files to support manpages handles.
-- Zachary Mark <zachary-mark@github.com> Thu, 06 May 2021 17:11:18 +0000
github-backup-utils (3.0.0) UNRELEASED; urgency=medium
* Fix restoring the password pepper for already configured instances #683
-- Michael Dang <dangmh@github.com> Tue, 16 Feb 2021 22:32:25 +0000
github-backup-utils (3.0.0.rc1) UNRELEASED; urgency=medium
* Cleanup nomad container when restore for 2.22 #670
* Use ghe-cluster-nomad-cleanup for cluster mode #663
* Only run ghe-nomad-cleanup in 3.0 #662
* Revert backup-utils gitHub env and a few more fixes #661
* Note how to test filesystem symlink / hardlink support #660
* stop github-timerd based on its running environment #659
* Backup and restore password pepper #656
* github-env -> github-env-dispatch #654
* Rename redis-cli to ghe-redis-cli #639
-- Michael Dang <dangmh@github.com> Thu, 14 Jan 2021 21:17:53 +0000
github-backup-utils (2.22.0) UNRELEASED; urgency=medium
* Added basic timing around the ghe-restore process #625
* Improve python3 & finding moreutils parallel #627
* Turn off POSIX for ghe-backup-config #632
* Add parallelized restore capability to ghe-restore-storage #635
* Update backup-utils for new features in 2.22 #641
-- Hao Jiang <jianghao0718@github.com> Wed, 23 Sep 2020 15:48:54 +0000
github-backup-utils (2.21.0) UNRELEASED; urgency=medium
* Introduce option to skip restoring of audit logs #596
* Beta: Execute ghe-backup tasks in parallel #597
* Beta: Execute ghe-restore tasks in parallel #601
* Run repositories restore in parallel #603
* Fix mismatched `bm_start` and `bm_end` commands #607
* remove rsync restore method used by GHES versions prior to 2.13 #608
* Output MySQL backup strategy for clarity during backup and restore #610
-- Hao Jiang <jianghao0718@github.com> Tue, 09 Jun 2020 17:59:06 +0000
github-backup-utils (2.19.5) UNRELEASED; urgency=medium
* In legacy mode we should use ghe-import-mysql #581
-- Caine Jette <cainejette@github.com> Fri, 21 Feb 2020 19:13:17 +0000
github-backup-utils (2.20.2) UNRELEASED; urgency=medium
* In legacy mode we should use ghe-import-mysql #581
-- Hao Jiang <jianghao0718@github.com> Thu, 20 Feb 2020 22:47:09 +0000
github-backup-utils (2.20.1) UNRELEASED; urgency=medium
* Fixes gist route calculation for GHES version 2.20
-- Caine Jette <cainejette@github.com> Wed, 19 Feb 2020 21:47:18 +0000
github-backup-utils (2.19.4) UNRELEASED; urgency=medium
* Fix the way we connect to mysql master using ssh forwarding for binary backups #567
-- Hao Jiang <jianghao0718@github.com> Tue, 18 Feb 2020 17:54:31 +0000
github-backup-utils (2.20.0) UNRELEASED; urgency=medium
* Fix `ghe-backup-repositories` performance for large instances #541
* Fix two issues with binary backup (slow gzip compression and support for restore from instances other than SQL master) #551
-- Alejandro Figueroa <thejandroman@github.com> Tue, 11 Feb 2020 20:47:17 +0000
github-backup-utils (2.19.3) UNRELEASED; urgency=medium
* Fix two issues with binary backup (slow gzip compression and support for restore from instances other than SQL master) #551
-- Hao Jiang <jianghao0718@github.com> Tue, 11 Feb 2020 19:31:55 +0000
github-backup-utils (2.19.2) UNRELEASED; urgency=medium
* Fix `ghe-backup-repositories` performance for large instances #541
-- Evgenii Khramkov <ewgenius@github.com> Fri, 31 Jan 2020 19:13:46 +0000
github-backup-utils (2.19.1) UNRELEASED; urgency=medium
* Cater for more explicit gist paths used in routes file #524
* Suppress "*.rsync': No such file or directory" errors when no data to backup or restore #525
-- Colin Seymour <colin@github.com> Wed, 11 Dec 2019 09:33:01 +0000
github-backup-utils (2.19.0) UNRELEASED; urgency=medium
* Remove temporary exclude file from the backup host not the target GHES appliance #516
* Update Debian package depends to include git #520
-- Colin Seymour <colin@github.com> Tue, 12 Nov 2019 18:57:12 +0000
github-backup-utils (2.18.0) UNRELEASED; urgency=medium
* Replace "sed -E" in ghe-host-check with a more portable solution #509
-- Colin Seymour <colin@github.com> Tue, 20 Aug 2019 18:49:44 +0000
github-backup-utils (2.17.1) UNRELEASED; urgency=medium
* Redirect ghe-export-audit-logs stderr output unless using verbose output #497
-- Colin Seymour <colin@github.com> Wed, 05 Jun 2019 08:43:25 +0000
github-backup-utils (2.17.0) UNRELEASED; urgency=medium
* Restore target is ignored when specified on the command line #476
* Support incremental backups for MySQL-stored audit logs #485
-- Colin Seymour <colin@github.com> Thu, 23 May 2019 08:20:15 +0000
github-backup-utils (2.16.1) UNRELEASED; urgency=medium
* Detect storage user on each cluster host being backed up or restored #472
-- Colin Seymour <colin@github.com> Tue, 26 Feb 2019 16:37:04 +0000
github-backup-utils (2.16.0) UNRELEASED; urgency=medium
* (There was no descriptions)
-- Colin Seymour <colin@github.com> Tue, 22 Jan 2019 20:25:34 +0000
github-backup-utils (2.15.1) UNRELEASED; urgency=medium
* Restoring to an un-configured appliance fails due to a missing license file #449
-- Colin Seymour <colin@github.com> Tue, 13 Nov 2018 17:34:21 +0000
github-backup-utils (2.15.0) UNRELEASED; urgency=medium
* (There was no descriptions)
-- Colin Seymour <colin@github.com> Tue, 16 Oct 2018 16:40:03 +0000
github-backup-utils (2.15.0) UNRELEASED; urgency=medium
* (There was no descriptions)
-- Colin Seymour <colin@github.com> Tue, 16 Oct 2018 16:07:36 +0000
github-backup-utils (2.14.3) UNRELEASED; urgency=medium
* Improve multi-platform detection of simultaneous ghe-backup runs #435
-- Colin Seymour <colin@github.com> Tue, 11 Sep 2018 17:03:42 +0000
github-backup-utils (2.14.2) UNRELEASED; urgency=medium
* Capture and display repository/gist warnings during cluster restore #423
* Use remote tempdir when finalizing Pages routes #424
* Use old rsync restore method for pages prior to 2.13 #426
-- Colin Seymour <colin@github.com> Tue, 21 Aug 2018 13:57:20 +0000
github-backup-utils (2.14.1) UNRELEASED; urgency=medium
* Don't fail a backup if the Management Console password isn't set #416
* Fix permissions issues when repeat restoring to configured cluster instance #417
* Add missing dependencies to debian packaging #418
* Prevent restoring snapshots to older releases #420
-- Colin Seymour <colin@github.com> Tue, 07 Aug 2018 16:00:36 +0000
github-backup-utils (2.14.0) UNRELEASED; urgency=medium
* Disable pager and context when running git-diff #411
* Optimise hookshot and audit log backups and restores and MySQL restores #413
-- Colin Seymour <colin@github.com> Thu, 12 Jul 2018 15:11:11 +0000
github-backup-utils (2.13.2) UNRELEASED; urgency=medium
* Treat missing repository networks, gists, and storage objects as a non-critical error #386
* Clean up stale HA nodes on restore #396
* Cleanup all SSH muxes in a non blocking way #402
* Raise an error if the current symlink doesn't exist when attempting to restore it #404
-- Colin Seymour <colin@github.com> Fri, 22 Jun 2018 10:08:22 +0000
github-backup-utils (2.13.1) UNRELEASED; urgency=medium
* Retry with the admin ssh port on network unreachable too. #377
* Output backup utils version on backup and restore #384
* Check filesystem supports hardlinks #388
* Switch back to optimised restore route calculation #389
* Optionally log verbose output to a file instead of stdout #391
* Switch back to optimised backup route calculation #392
* Remove check for git from ghe-ssh #393
* Use remote's mktemp to create temp dir on remote host #395
-- Colin Seymour <colin@github.com> Wed, 09 May 2018 10:16:18 +0000
github-backup-utils (2.13.0) UNRELEASED; urgency=medium
* Unify the backup & restore process #375
-- Colin Seymour <colin@github.com> Tue, 27 Mar 2018 16:01:43 +0000
github-backup-utils (2.11.4) UNRELEASED; urgency=medium
* Move check for git for ssh muxing into ghe-ssh #378
* Make it clear the settings need to be applied after restoring to an unconfigured instance #381
-- Colin Seymour <colin@github.com> Tue, 27 Mar 2018 13:20:18 +0000
github-backup-utils (2.11.3) UNRELEASED; urgency=medium
* Update argument parsing and help/usage consistency #320
* Fix failures variable #353
* Remove other snapshot contents before removing the "incomplete" file #358
* Backup and restore the management console password #361
* Check for git before allowing SSH multiplex #362
* Cleanup SSH multiplexing on exit #363
* Filter cluster nodes by role during backup and restore #367
* Optimise route generation and finalisation during cluster restores of pages #369
* Allow extra rsync options to override default options #370
-- Colin Seymour <colin@github.com> Wed, 28 Feb 2018 16:32:07 +0000
github-backup-utils (2.11.2) UNRELEASED; urgency=medium
* Allow the restoration of configuration to Cluster #347
* Switch to TMPDIR before initiating SSH multiplexing workaround to prevent locking the destination filesystem #348
-- Colin Seymour <colin@github.com> Thu, 09 Nov 2017 12:16:23 +0000
github-backup-utils (2.11.1) UNRELEASED; urgency=medium
* Refresh the existing indices when restoring Elasticsearch indices to cluster #328
* Fix failure to restore 2.9/2.10 backups to 2.11 prevented by incorrect detection of the audit log migration #333
* Use git to generate short name for SSH multiplex control path #335
* Remove use of --literally when computing arbitrary shasum #338
* Remove -o option from ps use #341
-- Colin Seymour <colin@github.com> Thu, 05 Oct 2017 14:47:56 +0000
github-backup-utils (2.11.0) UNRELEASED; urgency=medium
* Use calculated routes when backing up storage data from a cluster #318
* Add SSH multiplexing support #321
* Optimise route generation and finalisation during cluster restores #322
* Prefer the SSH port specified on the command line #324
-- Colin Seymour <colin@github.com> Wed, 13 Sep 2017 16:31:20 +0000
github-backup-utils (2.10.0) UNRELEASED; urgency=medium
* Include the user data directory in the benchmark name #311
* Use existing Elasticsearch indices to speed up transfer during a restore #310
* Improve detection of failures in cluster backup rsync threads #301
* Improve redis backup robustness #306
* Use default niceness for restores #308
* Add additional case to SSH port detection logic #304
* Suppress additional dd output noise #289
* Track completeness of Elasticsearch JSON dumps #298
-- Steven Honson <snh@github.com> Thu, 08 Jun 2017 09:06:16 +1000
github-backup-utils (2.9.0) UNRELEASED; urgency=medium
* Block restores to appliances with HA configured #291
* Add a `--version` flag #284
* Check backup-utils are not being run on GitHub Enterprise host #286
* Backup and restore custom CA certificates #281
* Hookshot backup/restores optimisations #276
-- Sergio Rubio <rubiojr@github.com> Wed, 01 Mar 2017 09:39:26 -0800
github-backup-utils (2.8.3) UNRELEASED; urgency=medium
* Set restore status on all cluster nodes #274
* Fix pages backups and restores in GitHub Enterprise 11.10 #275
-- Steven Honson <snh@github.com> Wed, 21 Dec 2016 21:01:20 +1100
github-backup-utils (2.8.2) UNRELEASED; urgency=medium
* Backup and restore the appliance UUID #272
-- Sergio Rubio <rubiojr@github.com> Thu, 17 Nov 2016 15:58:08 +0100
github-backup-utils (2.8.1) UNRELEASED; urgency=medium
* Stop cron and timerd during restore #269
* Fix compatibility issue with older versions of OpenSSH #263
-- Sergio Rubio <rubiojr@github.com> Mon, 14 Nov 2016 22:04:48 +0100
github-backup-utils (2.8.0) UNRELEASED; urgency=low
* Adds support for GitHub Enterprise 2.8.0
* Speedup storage restores #247
* More portable backup-utils #260
-- rubiojr <rubiojr@github.com> Wed, 09 Nov 2016 06:35:21 -0800
github-backup-utils (2.7.1) UNRELEASED; urgency=medium
* Cluster: fix offline cluster node detection #250
* Detect leaked ssh keys in backup snapshots #253
-- Sergio Rubio <rubiojr@github.com> Tue, 20 Sep 2016 20:15:12 +0200
github-backup-utils (2.7.0) UNRELEASED; urgency=medium
* GitHub Enterprise 2.7.0 support
-- Sergio Rubio <rubiojr@github.com> Wed, 03 Aug 2016 20:25:31 +0200
github-backup-utils (2.6.4) UNRELEASED; urgency=medium
* Instrument/benchmark backups #238
* Cluster: remove restoring cluster.conf on restore #242
* Cluster: Prevent restoring to a standalone GHE appliance #244
-- Sergio Rubio <rubiojr@github.com> Wed, 27 Jul 2016 19:15:53 +0200
github-backup-utils (2.6.3) UNRELEASED; urgency=medium
* Cluster: git-hooks backup fixes #235
-- Sergio Rubio <rubiojr@github.com> Wed, 29 Jun 2016 21:05:21 +0200
github-backup-utils (2.6.2) UNRELEASED; urgency=medium
* git-hooks fixes #231
* Cluster: speedup repositories restore #232 (requires GitHub Enterprise
2.6.4)
* Cluster: restore Git over SSH keys #230
* Benchmark restores #219
-- Sergio Rubio <rubiojr@github.com> Wed, 22 Jun 2016 19:36:06 +0200
github-backup-utils (2.6.1) UNRELEASED; urgency=medium
* Cluster: faster gist restores #220
* Cluster: faster storage restores #212
* Cluster: fix git-hooks restores #204
-- Sergio Rubio <rubiojr@github.com> Tue, 31 May 2016 20:54:11 +0200
github-backup-utils (2.6.0) UNRELEASED; urgency=medium
* Adds support for GitHub Enterprise 2.6
* Adds an extra supported location for the backup configuration #197
* New config option to check for corrupted repositories after the backup #195
* General improvements and bug fixes
-- Sergio Rubio <rubiojr@github.com> Tue, 26 Apr 2016 18:03:01 +0200
github-backup-utils (2.5.2) UNRELEASED; urgency=medium
* New configuration variable: GHE_CREATE_DATA_DIR #186
* Require that snapshots originated from an instance running GitHub
Enterprise 2.5.0 or above when restoring to a cluster #182
* Prevent Git GC operations and some other maintenance jobs from running
while repositories are being restored #179
* Fix Solaris and SmartOS support, using Bash everywhere #177
-- Sergio Rubio <rubiojr@github.com> Wed, 30 Mar 2016 14:32:15 +0200
github-backup-utils (2.5.1) UNRELEASED; urgency=medium
* Fixes for cluster restores #173
* Fix Elasticsearch backups for GitHub Enterprise <= 2.2 #175
* Removed experimental S3 support #167
* Remote logging output fixes #170
* Update ghe-host-check to detect extra port 22 error #162
-- Sergio Rubio <rubiojr@github.com> Wed, 09 Mar 2016 14:44:05 +0100
github-backup-utils (2.5.0) UNRELEASED; urgency=medium
* Adds GitHub Enterprise 2.5 support
* Adds GitHub Enterprise Clustering support
* Backups and restores SAML keypairs
-- Sergio Rubio <rubiojr@github.com> Tue, 9 Feb 2016 00:02:37 +0000
github-backup-utils (2.4.0) UNRELEASED; urgency=medium
* Moves the in-progress detection to a separate file with PID which is
removed if the process is no longer running after the backup. #145, #99
* Updates the README to explain why backup-utils is useful even if you have
the high availability replica running. #140
* Changes the use of the --link-dest option to only occur when backing up
populated directories. #138
* Adds logging to /var/log/syslog on the remote GitHub Enterprise appliance
to both ghe-backup and ghe-restore. #131
* Restarts memcached after restoring to an already configured appliance to
ensure it doesn't contain out-of-sync information. #130
* Removes the temporary /data/user/repositories-nw-backup directory that
remains after successfully migrating the repository storage layout to the
new format used on GitHub Enterprise 2.2.0 and later after restoring a
backup from an older release of GitHub Enterprise. #129
* Add devscripts to Debian's build-depends for checkbashisms. #101
* Documents the -c option which forces the restoration of the configuration
information to an already configured appliance. #96
-- Colin Seymour <colin@github.com> Tue, 20 Oct 2015 00:02:37 +0000
github-backup-utils (2.2.0) UNRELEASED; urgency=medium
* Adds support for the new repositories filesystem layout include in
GitHub Enterprise v2.2. #122, #124
* ghe-restore now performs a config run on the instance after an incremental
restore to 11.10.x and 2.x instances. #100
* ghe-restore now fails fast when run against a GHE instance with replication
enabled. Replication should be disabled during a restore and then setup
after the restore completes. #121
* Fixes an issue with special port 122 detection failing when port is
overridden in an ssh config file. #102
* Removes a warning message when running ghe-backup against an instance with
GitHub Pages disabled. #117
* backup-utils release version numbers now track GitHub Enterprise releases
to ease the process of determining which version of backup-utils is
required for a given GitHub Enterprise version.
-- Ryan Tomayko <ryan@github.com> Wed, 29 Apr 2015 07:29:04 +0000
github-backup-utils (2.0.2) UNRELEASED; urgency=medium
* ghe-restore now requires that an already-configured appliance be put into
maintenance mode manually. This is a safeguard against accidentally
overwriting data on the wrong instance. #62, #84
* ghe-backup and ghe-restore now run a ghe-negotiate-version program on the
appliance to determine whether the backup-utils and GHE versions are
compatible. #91
* Various portability fixes for problems surfaced when running on Solaris
and FreeBSD. #86, #87
* Fixes an issue in ghe-backup where mysqldump failures weren't being
reported properly. #90
* Automated builds are now run on Travis CI. #77
-- Ryan Tomayko <ryan@github.com> Tue, 20 Jan 2015 16:00:00 +0000
github-backup-utils (2.0.1) UNRELEASED; urgency=medium
* Adds /etc/github-backup-utils/backup.config as a default config file search
location for deb / system installs.
* Enables SSH BatchMode for all remote command invocation except initial host
check / version identification.
* Fixes a bug in ghe-backup where Git GC process detection would misclassify
long-running server processes matching /git.*gc/, causing the backup operation
to timeout.
* Adds a note and link to the Migrating from GitHub Enterprise v11.10.34x to
v2.0 documentation in the README.
* Adds example / documentation for the GHE_EXTRA_SSH_OPTS config value to the
backup.config-example file.
-- Ryan Tomayko <ryan@github.com> Mon, 17 Nov 2014 12:47:22 +0000
github-backup-utils (2.0.0) UNRELEASED; urgency=medium
* Support for GitHub Enterprise 2.0.
* Support for migrating from GitHub Enterprise 11.10.34x to 2.0 (including from
VMware to AWS).
* ghe-backup retains hardlinks present on VM in backup snapshots, saving space.
* ghe-restore retains hardlinks present in backup snapshot when restoring to VM.
* backup-utils now includes debian packaging support.
* Fixes an issue with ghe-restore -s not using the snapshot specified.
* Fixes an issue with ghe-backup not waiting for nw-repack processes to finish
in some instances.
-- Ryan Tomayko <ryan@github.com> Mon, 10 Nov 2014 10:48:36 +0000
github-backup-utils (1.1.0) UNRELEASED; urgency=medium
* Updated documentation on minimum GitHub Enterprise version requirements for
online and incremental backups from v11.10.341 to at least v11.10.342.
* The ghe-restore command now prompts for confirmation of the host to restore to
before performing any destructive operation. This is to reduce the chances of
restoring to the wrong host. The prompt may be bypassed in automated scenarios
by providing the --force option.
* Added a -c option to ghe-restore for restoring base appliance settings in
addition to primary datastores. See ghe-restore --help for more information.
* Added a note about disabling maintenance mode on the appliance after a
successful ghe-restore operation.
* Added support for filesystem layout changes and upgraded server components in
* future versions of GitHub Enterprise.
-- Twan Wolthof <xeago@spotify.com> Sat, 18 Oct 2014 19:14:47 +0000
github-backup-utils (1.0.1) UNRELEASED; urgency=medium
* Initial release.
-- Twan Wolthof <xeago@spotify.com> Tue, 23 Sep 2014 08:34:55 +0000

1
debian/clean поставляемый
Просмотреть файл

@ -1 +0,0 @@
debian/*.1

1
debian/compat поставляемый
Просмотреть файл

@ -1 +0,0 @@
9

32
debian/control поставляемый
Просмотреть файл

@ -1,32 +0,0 @@
Source: github-backup-utils
Maintainer: Zachary Mark <zachary-mark@github.com>
Section: misc
Priority: optional
Standards-Version: 4.5.1
Build-Depends: debhelper (>= 9), git, moreutils, jq, rsync (>= 2.6.4), help2man,
Homepage: https://github.com/github/backup-utils
Vcs-Git: https://github.com/github/backup-utils.git
Vcs-Browser: https://github.com/github/backup-utils
Rules-Requires-Root: no
Package: github-backup-utils
Architecture: all
Depends: ${misc:Depends}, rsync (>= 2.6.4), moreutils, jq, git
Description: Backup and recovery utilities for GitHub Enterprise Server
The backup utilities implement a number of advanced capabilities for backup
hosts, built on top of the backup and restore features already included in
GitHub Enterprise Server.
.
These advanced features include:
- Complete GitHub Enterprise Server backup and recovery system via two simple
utilities: `ghe-backup` and `ghe-restore`.
- Online backups. The GitHub appliance need not be put in maintenance mode for
the duration of the backup run.
- Incremental backup of Git repository data. Only changes since the last
snapshot are transferred, leading to faster backup runs and lower network
bandwidth and machine utilization.
- Efficient snapshot storage. Only data added since the previous snapshot
consumes new space on the backup host.
- Multiple backup snapshots with configurable retention periods.
- Backup runs under the lowest CPU/IO priority on the GitHub appliance,
reducing performance impact while backups are in progress.

33
debian/copyright поставляемый
Просмотреть файл

@ -1,33 +0,0 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: GitHub Enterprise Server Backup Utilities
Source: https://github.com/github/backup-utils
Files: *
Copyright: 2014-2021, GitHub Inc.
License: MIT
Files: debian/*
Copyright: 2014-2021, GitHub Inc.
2014 Twan Wolthof <xeago@spotify.com>
2021 Hideki Yamane <h-yamane@sios.com>
License: MIT
License: MIT
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2
debian/install поставляемый
Просмотреть файл

@ -1,2 +0,0 @@
bin/* usr/bin
share/* usr/share

1
debian/manpages поставляемый
Просмотреть файл

@ -1 +0,0 @@
debian/*.1

18
debian/rules поставляемый
Просмотреть файл

@ -1,18 +0,0 @@
#!/usr/bin/make -f
VERSION=$$(cat $(CURDIR)/share/github-backup-utils/version)
override_dh_auto_build:
# generate manpages for ghe-backup, ghe-host-check and ghe-restore
help2man $(CURDIR)/bin/ghe-backup -N -o $(CURDIR)/debian/ghe-backup.1 \
-n "Take snapshots of all GitHub Enterprise data" \
--version-string="ghe-backup $(VERSION)"
help2man $(CURDIR)/bin/ghe-host-check -N -o $(CURDIR)/debian/ghe-host-check.1 \
-n "Restores a GitHub instance from local backup snapshots" \
--version-string="ghe-host-check $(VERSION)"
help2man $(CURDIR)/bin/ghe-restore -N -o $(CURDIR)/debian/ghe-restore.1 \
-n "Verify connectivity with the GitHub Enterprise Server host" \
--version-string="ghe-restore $(VERSION)"
%:
dh $@

1
debian/source/format поставляемый
Просмотреть файл

@ -1 +0,0 @@
3.0 (native)

Просмотреть файл

@ -1,29 +0,0 @@
---
version: 1
ownership:
- name: ghes-backup-utilities
long_name: GHES Backup Utilities
description: GitHub Enterprise Disaster Recover Solution
kind: logical
repo: https://github.com/github/backup-utils-private
qos: best_effort
team_slack: ghes-lifecycle-aor
team: github/ghes-lifecycle
maintainer: whitneyimura
exec_sponsor: jakuboleksy
tier: 3
product_manager: davidjarzebowski
sev1:
slack: ghes-on-call
alert_slack: ghes-backup-utils
pagerduty: https://github.pagerduty.com/escalation_policies#PBQWK20
tta: 30 minutes
sev2:
issue: https://github.com/github/ghes/issues/new
tta: 1 business day
sev3:
issue: https://github.com/github/ghes/issues
tta: 1 week
support_squad:
slack: support-squad-infrastructure
issue: https://github.com/github/support-squad-infrastructure/issues

Просмотреть файл

@ -1,18 +0,0 @@
### Bug Fixes
* `ghe-backup-myql` and `ghe-restore-mysql` will now exit 1 on errors.
* On an instance with Actions enabled, incorrect backup and restore settings prevented the storage container name from being restored. This made the logs from that container inaccessible, and caused Actions to create a new storage container in a different location.
* When backups are run for HA instances in both primary and replica nodes a `.sync-in-progress` file will be created. This will disable `NetworkMaintenance` jobs from running and queueing up when backups are running from the primary node.
### Changes
* Estimated transfer sizes will be calculated on appropriate nodes for clustered environments.
* Added support for finding the `parallel` command from the `moreutils` tool suite on more Linux distributions, including Arch Linux and Alpine Linux.
* `ghe-restore` avoids unnecessary `rsync` operations when restoring to non-clustered environments.
* `ghe-backup` and `ghe-restore` output their total runtime
* `rsync` compression is now disabled by default. The `-z` flag has been removed from the `ghe-rsync` command in multiple files to improve transfer speed and reduce CPU usage. To enable `rsync` compression globally, add `GHE_RSYNC_COMPRESSION_ENABLED=yes` to the `backup.config` file.
* Updates the Host OS version output to use `/etc/os-release` for better compatibility with other Linux distributions.
### Backups and Disaster Recovery
* When a NFS mount is detected for snapshots on backup hosts, backup logs will show a warning to notify the user that such a setup may incur performance issues as highlighted in [storage requirements](https://github.com/github/backup-utils-private/blob/master/docs/requirements.md#storage-requirements) documentation.

Просмотреть файл

@ -1,42 +0,0 @@
#!/usr/bin/env bash
# Usage: script/package-deb
# Script to build a deb release package from the current HEAD version.
# The package version comes from the debian/changelog file so that should
# be edited before running this.
set -e
# Change into project root
cd "$(dirname "$0")"/..
# Fetch tags from remote repository
git fetch --tags
# Basic package name and version.
PKG_BASE="github-backup-utils"
PKG_VERS="$(git describe --tags)"
PKG_NAME="${PKG_BASE}-${PKG_VERS}"
PKG_HEAD="$(git rev-parse HEAD)"
# Run git-archive to generate tarball
rm -rf dist/debuild
trap "rm -rf dist/debuild" EXIT
mkdir -p dist/debuild
distdir="$(pwd)/dist/debuild/$PKG_NAME"
git clone -q . "$distdir"
cd "$distdir"
echo "Removing files listed in .releaseignore ..."
while IFS= read -r line; do
rm -rf "$line"
done < .releaseignore
echo "Removing .releaseignore ..."
rm -f .releaseignore
git checkout -q "$PKG_HEAD"
debuild -uc -us 1>&2
cd ..
files=$(ls -1 *.deb *.tar.xz *.dsc *.changes)
mv $files ../
for f in $files; do echo "dist/$f"; done

Просмотреть файл

@ -1,42 +0,0 @@
#!/usr/bin/env bash
# Usage: script/package-tarball
# Script to build a tarball release package from the current HEAD version.
# The package version comes from `git-describe --tags' so the release tag should
# be in place before this command is run.
set -e
# Change into project root
cd "$(dirname "$0")"/..
# Fetch tags from remote repository
git fetch --tags
# Basic package name and version.
PKG_BASE="github-backup-utils"
PKG_VERS="$(git describe --tags)"
PKG_NAME="${PKG_BASE}-${PKG_VERS}"
# Remove all files or directories listed in .releaseignore
echo "Removing files listed in .releaseignore ..."
while IFS= read -r line; do
rm -rf "$line"
done < .releaseignore
# Remove the .releaseignore file itself
echo "Removing .releaseignore ..."
rm -f .releaseignore
# Run git-archive to generate tarball
echo "Creating ${PKG_NAME}.tar.gz ..."
mkdir -p dist
git archive \
--format=tar.gz \
--prefix="$PKG_NAME/" \
--output="dist/${PKG_NAME}.tar.gz" \
HEAD
# List archive contents for review
gzip -dc < "dist/${PKG_NAME}.tar.gz" | tar tf -
# Output location
echo "Package dist/${PKG_NAME}.tar.gz OK"

Просмотреть файл

@ -1,506 +0,0 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
#/ Usage: release [--dry-run] [--skip-version-bump-check] <version> [min_version]
#/
#/ Publish a backup-utils release:
#/ * Updates the package changelog
#/ * Bumps the backup-utils version if required
#/ * Creates the release pull request
#/ * Merges the release pull request
#/ * Creates the release draft
#/ * Tags the release
#/ * Builds the release assets and uploads them
#/
#/ Notes:
#/ * Needs GH_RELEASE_TOKEN and GH_AUTHOR set in the environment.
#/ * Export GH_OWNER and GH_REPO if you want to use a different owner/repo
#/ * Only pull requests labeled with bug, feature or enhancement will show up in the
#/ release page and the changelog.
#/ * If this is a X.Y.0 release, a minimum supported version needs to be supplied too.
#/
require "json"
require "net/http"
require "time"
require "erb"
require "English"
API_HOST = ENV["GH_HOST"] || "api.github.com"
API_PORT = 443
GH_REPO = ENV["GH_REPO"] || "backup-utils"
GH_OWNER = ENV["GH_OWNER"] || "github"
GH_AUTHOR = ENV["GH_AUTHOR"]
DEB_PKG_NAME = "github-backup-utils"
GH_BASE_BRANCH = ENV["GH_BASE_BRANCH"] || "master" # TODO: should we even allow a default or require all params get set explicitly?
GH_STABLE_BRANCH = ""
# If PUBLISH is false, we leave the release in a draft state to be manually published later through the UI
PUBLISH = ENV["PUBLISH"] == "true" || false
CHANGELOG_TMPL = "" '<%= package_name %> (<%= package_version %>) UNRELEASED; urgency=medium
<%- changes.each do |ch| -%>
* <%= ch.strip.chomp %>
<% end -%>
-- <%= GH_AUTHOR %> <%= Time.now.utc.strftime("%a, %d %b %Y %H:%M:%S %z") %>
' ""
# Override Kernel.warn
def warn(msg)
Kernel.warn msg unless @no_warn
end
def client(host = API_HOST, port = API_PORT)
@http ||= begin
c = Net::HTTP.new(host, port)
c.use_ssl = true
c
end
end
def get(path)
req = Net::HTTP::Get.new(path)
req["Authorization"] = "token #{release_token}"
client.request(req)
end
def post(path, body)
req = Net::HTTP::Post.new(path)
req["Authorization"] = "token #{release_token}"
req.body = body
client.request(req)
end
def post_file(path, body)
req = Net::HTTP::Post.new(path)
req["Authorization"] = "token #{release_token}"
req["Content-Type"] = path.match?(/.*\.tar\.gz$/) ? "application/tar+gzip" : "application/vnd.debian.binary-package"
req.body = body
client.request(req)
end
def put(path, body)
req = Net::HTTP::Put.new(path)
req["Authorization"] = "token #{release_token}"
req.body = body
client.request(req)
end
def patch(path, body)
req = Net::HTTP::Patch.new(path)
req["Authorization"] = "token #{release_token}"
req.body = body
client.request(req)
end
def release_token
token = ENV["GH_RELEASE_TOKEN"]
raise "GH_RELEASE_TOKEN environment variable not set" if token.nil?
token
end
# Create a lightweight tag
def tag(name, sha)
body = {
"ref": "refs/tags/#{name}",
"sha": sha,
}.to_json
res = post("/repos/#{GH_OWNER}/#{GH_REPO}/git/refs", body)
raise "Creating tag ref failed (#{res.code})" unless res.is_a? Net::HTTPSuccess
end
def bug_or_feature?(issue_hash)
return true if issue_hash["labels"].find { |label| ["bug", "feature", "enhancement"].include?(label["name"]) }
false
end
def issue_from(issue)
res = get("/repos/#{GH_OWNER}/#{GH_REPO}/issues/#{issue}")
raise "Issue ##{issue} not found in #{GH_OWNER}/#{GH_REPO}" unless res.is_a? Net::HTTPSuccess
JSON.parse(res.body)
end
def beautify_changes(changes)
out = []
changes.each do |chg|
next unless chg =~ /#(\d+)/
begin
issue = issue_from Regexp.last_match(1)
out << "#{issue["title"]} ##{Regexp.last_match(1)}" if bug_or_feature?(issue)
rescue => e
warn "Warning: #{e.message}"
end
end
out
end
def changelog
puts "building changelog by comparing origin/#{GH_STABLE_BRANCH}...origin/#{GH_BASE_BRANCH}"
changes = `git log --pretty=oneline origin/#{GH_STABLE_BRANCH}...origin/#{GH_BASE_BRANCH} --reverse --grep "Merge pull request" | sort -t\# -k2`.lines.map(&:strip)
raise "Building the changelog failed" if $CHILD_STATUS != 0
changes
end
def build_changelog(changes, package_name, package_version)
ERB.new(CHANGELOG_TMPL, nil, "-").result(binding)
end
def update_changelog(changes, name, version, path = "debian/changelog")
raise "debian/changelog not found" unless File.exist?(path)
File.open("#{path}.new", "w") do |f|
f.puts build_changelog changes, name, version
f.puts(File.read(path))
end
File.rename("#{path}.new", path)
end
def create_release(tag_name, branch, rel_name, rel_body, draft = true)
body = {
'tag_name': tag_name,
'target_commitish': branch,
'name': rel_name,
'body': rel_body,
'draft': draft,
'prerelease': false,
}.to_json
res = post("/repos/#{GH_OWNER}/#{GH_REPO}/releases", body)
raise "Failed to create release (#{res.code})" unless res.is_a? Net::HTTPSuccess
JSON.parse(res.body)
end
def publish_release(release_id)
body = {
'draft': false,
}.to_json
res = patch("/repos/#{GH_OWNER}/#{GH_REPO}/releases/#{release_id}", body)
raise "Failed to update release (#{res.code})" unless res.is_a? Net::HTTPSuccess
end
def list_releases
res = get("/repos/#{GH_OWNER}/#{GH_REPO}/releases")
raise "Failed to retrieve releases" unless res.is_a? Net::HTTPSuccess
JSON.parse(res.body)
end
def release_available?(tag_name)
return true if list_releases.find { |r| r["tag_name"] == tag_name }
false
end
def bump_version(new_version, min_version = nil, path = "share/github-backup-utils/version")
current_version = Gem::Version.new(File.read(path).strip.chomp)
if !@skip_version_bump_check && (Gem::Version.new(new_version) < current_version)
raise "New version should be newer than #{current_version}"
end
File.open("#{path}.new", "w") { |f| f.puts new_version }
File.rename("#{path}.new", path)
unless min_version.nil?
content = File.read("bin/ghe-host-check")
new_content = content.gsub(/supported_minimum_version="[0-9]\.[0-9]+\.0"/, "supported_minimum_version=\"#{min_version}\"")
File.open("bin/ghe-host-check", "w") { |file| file.puts new_content }
content = File.read("test/testlib.sh")
new_content = content.gsub(/GHE_TEST_REMOTE_VERSION:=[0-9]\.[0-9]+\.0/, "GHE_TEST_REMOTE_VERSION:=#{new_version}")
File.open("test/testlib.sh", "w") { |file| file.puts new_content }
end
end
def push_release_branch(version)
unless (out = `git checkout --quiet -b release-#{version}`)
raise "Creating release branch failed:\n\n#{out}"
end
unless (out = `git commit --quiet -m 'Bump version: #{version} [ci skip]' debian/changelog share/github-backup-utils/version bin/ghe-host-check test/testlib.sh script/test`)
raise "Error committing changelog and version:\n\n#{out}"
end
unless (out = `git push --quiet origin release-#{version}`)
raise "Failed pushing the release branch:\n\n#{out}"
end
end
def update_stable_branch
`git checkout --quiet #{GH_STABLE_BRANCH}`
unless (out = `git merge --quiet --ff-only origin/#{GH_BASE_BRANCH}`)
warn "Merging #{GH_BASE_BRANCH} into #{GH_STABLE_BRANCH} failed:\n\n#{out}"
end
unless (out = `git push --quiet origin #{GH_STABLE_BRANCH}`)
warn "Failed pushing the #{GH_STABLE_BRANCH} branch:\n\n#{out}"
end
end
def create_release_pr(version, release_body)
body = {
'title': "Bump version: #{version}",
'body': release_body,
'head': "release-#{version}",
'base': GH_BASE_BRANCH,
}.to_json
res = post("/repos/#{GH_OWNER}/#{GH_REPO}/pulls", body)
raise "Creating release PR failed (#{res.code})" unless res.is_a? Net::HTTPSuccess
JSON.parse(res.body)
end
def merge_pr(number, sha, version)
body = {
'commit_title': "Merge pull request ##{number} from github/release-#{version}",
'commit_message': "Bump version: #{version}",
'sha': sha,
'merge_method': "merge",
}.to_json
pr_mergeable? number
res = put("/repos/#{GH_OWNER}/#{GH_REPO}/pulls/#{number}/merge", body)
raise "Merging PR failed (#{res.code})" unless res.is_a? Net::HTTPSuccess
JSON.parse(res.body)
end
class RetryError < StandardError
end
def pr_mergeable?(number)
begin
retries ||= 5
res = get("/repos/#{GH_OWNER}/#{GH_REPO}/pulls/#{number}")
raise RetryError if JSON.parse(res.body)["mergeable"].nil?
mergeable = JSON.parse(res.body)["mergeable"]
rescue RetryError
sleep 1
retry unless (retries -= 1).zero?
raise "PR is unmergable."
end
mergeable || false
end
def can_auth?
!ENV["GH_RELEASE_TOKEN"].nil?
end
def repo_exists?
res = get("/repos/#{GH_OWNER}/#{GH_REPO}")
res.is_a? Net::HTTPSuccess
end
def can_build_deb?
system("which debuild > /dev/null 2>&1")
end
def package_tarball
unless (out = `script/package-tarball 2>&1`)
raise "Failed to package tarball:\n\n#{out}"
end
out
end
def package_deb
unless (out = `DEB_BUILD_OPTIONS=nocheck script/package-deb 2>&1`)
raise "Failed to package Debian package:\n\n#{out}"
end
out
end
def attach_assets_to_release(upload_url, release_id, files)
@http = nil
client(URI(upload_url.gsub(/{.*}/, "")).host)
begin
files.each do |file|
raw_file = File.open(file).read
res = post_file("/repos/#{GH_OWNER}/#{GH_REPO}/releases/#{release_id}/assets?name=#{File.basename(file)}", raw_file)
raise "Failed to attach #{file} to release (#{res.code})" unless res.is_a? Net::HTTPSuccess
end
rescue => e
raise e
end
@http = nil
end
def clean_up(version)
`git checkout --quiet #{GH_BASE_BRANCH}`
`git fetch --quiet origin --prune`
`git pull --quiet origin #{GH_BASE_BRANCH} --prune`
`git branch --quiet -D release-#{version} >/dev/null 2>&1`
`git push --quiet origin :release-#{version} >/dev/null 2>&1`
`git branch --quiet -D tmp-packaging >/dev/null 2>&1`
end
def is_base_branch_valid?(branch)
if branch == "master" || branch.match(/^\d+\.\d+-main$/)
return true
else
return false
end
end
def get_stable_branch_name(branch)
## derive the proper stable branch. if the base branch is "master" the stable branch is just "stable"
## if the base branch is a release branch, the stable branch will be "x.y-stable"
result = ""
if branch == "master"
result = "stable"
else
result = branch.gsub(/-main$/, "-stable")
end
result
end
#### All the action starts ####
if $PROGRAM_NAME == __FILE__
begin
## validate base branch. this must either be "master" or a release branch which will match the pattern "x.y-main"
raise "The branch #{GH_BASE_BRANCH} is not valid for releasing backup-utils. branch name must be master or match x.y-main" if !is_base_branch_valid?(GH_BASE_BRANCH)
GH_STABLE_BRANCH = get_stable_branch_name(GH_BASE_BRANCH)
puts "base branch = " + GH_BASE_BRANCH
puts "stable branch = " + GH_STABLE_BRANCH
args = ARGV.dup
dry_run = false
skip_version_bump_check = false
if args.include?("--dry-run")
dry_run = true
args.delete "--dry-run"
end
if args.include?("--no-warn")
@no_warn = true
args.delete "--no-warn"
end
if args.include?("--skip-version-bump-check")
@skip_version_bump_check = true
args.delete "--skip-version-bump-check"
end
raise "Usage: release [--dry-run] [--skip-version-bump-check] <version> [min_version]" if args.empty?
begin
version = Gem::Version.new(args[0])
min_version = args[1] ? args[1] : nil
rescue ArgumentError
raise "Error parsing version #{args[0]}"
end
raise "Minimum supported version is required for X.Y.0 releases\n\nUsage: release [--dry-run] <version> [min_version]" if /[0-9]\.[0-9]+\.0/ =~ version.to_s && min_version.nil?
raise "The repo #{GH_REPO} does not exist for #{GH_OWNER}" unless repo_exists?
raise "GH_AUTHOR environment variable is not set" if GH_AUTHOR.nil?
release_changes = []
release_changes = beautify_changes changelog if can_auth?
release_a = false
release_a = release_available? "v#{version}"
puts "Bumping version to #{version}..."
bump_version version, min_version
if dry_run
puts "Existing release?: #{release_a}"
puts "New version: #{version}"
puts "Min version: #{min_version}" unless min_version.nil?
puts "Owner: #{GH_OWNER}"
puts "Repo: #{GH_REPO}"
puts "Author: #{GH_AUTHOR}"
puts "Token: #{ENV["GH_RELEASE_TOKEN"] && "set" || "unset"}"
puts "Base branch: #{GH_BASE_BRANCH}"
puts "Changelog:"
if release_changes.empty?
puts " => No new bug fixes, enhancements or features."
else
release_changes.each { |c| puts " * #{c}" }
end
puts "Changes:"
puts `git diff --color`
`git checkout -- share/github-backup-utils/version`
`git checkout -- bin/ghe-host-check`
`git checkout -- test/testlib.sh`
exit
end
raise 'Unable to build Debian pkg: "debuild" not found.' unless can_build_deb?
raise "Release #{version} already exists." if release_a
`git fetch --quiet origin --prune`
branches = `git branch --all | grep release-#{version}$`
unless branches.empty?
out = "Release branch release-#{version} already exists. "
out += "Branches found:"
branches.each_line { |l| out += "\n* #{l.strip.chomp}" }
raise out
end
puts "Updating changelog..."
update_changelog release_changes, DEB_PKG_NAME, version
release_body = "Includes general improvements & bug fixes"
release_body += " and support for GitHub Enterprise Server v#{version}" unless min_version.nil?
release_changes.each do |c|
release_body += "\n* #{c}"
end
puts "Pushing release branch and creating release PR..."
push_release_branch version
res = create_release_pr(version, "#{release_body}\n\n/cc @github/backup-utils")
puts "Merging release PR..."
res = merge_pr res["number"], res["head"]["sha"], version
puts "Tagging and publishing release..."
tag "v#{version}", res["sha"]
puts "Creating release..."
release_title = "GitHub Enterprise Server Backup Utilities v#{version}"
res = create_release "v#{version}", GH_BASE_BRANCH, release_title, release_body, true
# Tidy up before building tarball and deb pkg
clean_up version
puts "Building release tarball..."
package_tarball
puts "Building Debian pkg..."
package_deb
puts "Attaching Debian pkg and tarball to release..."
base_dir = File.expand_path(File.join(File.dirname(__FILE__), ".."))
attach_assets_to_release res["upload_url"], res["id"], ["#{base_dir}/dist/#{DEB_PKG_NAME}-v#{version}.tar.gz"]
attach_assets_to_release res["upload_url"], res["id"], ["#{base_dir}/dist/#{DEB_PKG_NAME}_#{version}_all.deb"]
if PUBLISH
puts "Publishing release..."
publish_release res["id"]
end
puts "Cleaning up..."
clean_up version
puts "Updating #{GH_STABLE_BRANCH} branch..."
update_stable_branch
if !PUBLISH
puts 'Release left in a "Draft" state. Go to the https://github.com/github/backup-utils/releases and publish when ready.'
end
puts "Released!"
rescue RuntimeError => e
$stderr.puts "Error: #{e}"
exit 1
end
end

Просмотреть файл

@ -1,28 +0,0 @@
#!/usr/bin/env bash
# Usage: script/test
set -e
ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
TMPDIR="$ROOTDIR/test/tmp"
# Remove possible remnants of previous test runs
rm -rf "${TMPDIR:?}/*"
print_test_results() {
if [ -n "$GITHUB_STEP_SUMMARY" ]; then
echo -e "### Test results\n" >> "$GITHUB_STEP_SUMMARY"
echo "| Test suite | Result | Successful | Failed | Skipped | Duration |" >> "$GITHUB_STEP_SUMMARY"
echo "|---|---|--:|--:|--:|--:|" >> "$GITHUB_STEP_SUMMARY"
sort -V "$TMPDIR/results" >> "$GITHUB_STEP_SUMMARY"
fi
}
# Enable verbose logging of ssh commands
export GHE_VERBOSE_SSH=true
if ! find test -name "test-*.sh" -print0 | sort -z |xargs -0 -n 1 /bin/bash; then
print_test_results
exit 1
fi
print_test_results

Просмотреть файл

@ -1,63 +0,0 @@
#!/usr/bin/env bash
# bm.sh: benchmarking Bash code blocks
#
# Example:
# bm_start "wget request"
# wget --quiet https://www.google.com
# bm_end "wget request"
#
# Sample output:
# $ bash test.sh
# wget request took 2s
bm_desc_to_varname(){
echo "__bm$(echo "$@" | tr -cd '[[:alnum:]]')"
}
bm_start()
{
eval "$(bm_desc_to_varname "$@")_start=$(date +%s)"
if [ -n "$GHE_DEBUG" ]; then
echo "Debug: $1 (bm_start)"
fi
bm_init > /dev/null
}
bm_init() {
if [ -n "$BM_FILE_PATH" ]; then
echo $BM_FILE_PATH
return
fi
local logfile="benchmark.${BM_TIMESTAMP:-$(date +"%Y%m%dT%H%M%S")}.log"
if [ -n "$GHE_RESTORE_SNAPSHOT_PATH" ]; then
export BM_FILE_PATH=$GHE_RESTORE_SNAPSHOT_PATH/benchmarks/$logfile
else
export BM_FILE_PATH=$GHE_SNAPSHOT_DIR/benchmarks/$logfile
fi
mkdir -p "$(dirname $BM_FILE_PATH)"
echo $BM_FILE_PATH
}
bm_end() {
if [ -z "$BM_FILE_PATH" ]; then
echo "Call bm_start first" >&2
exit 1
fi
local tend tstart total
tend=$(date +%s)
tstart=$(eval "echo \$$(bm_desc_to_varname "$@")_start")
total=$(($tend - $tstart))
echo "$1 took ${total}s" >> $BM_FILE_PATH
# also log timing information in the verbose log
echo "$1 took ${total}s" 1>&3
if [ -n "$GHE_DEBUG" ]; then
echo "Debug: $1 took ${total}s (bm_end)"
fi
# track progress
progress "$1 took ${total}s"
}

Просмотреть файл

@ -1,51 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-actions
#/ Take an online, incremental snapshot of all Actions data (excluding
#/ what is stored in MSSQL)
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root backup snapshot directory based on config
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
backup_dir="$GHE_SNAPSHOT_DIR/actions"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "Error: rsync not found." 1>&2
exit 1
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Make sure root backup dir exists if this is the first run
mkdir -p "$backup_dir"
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$GHE_DATA_DIR/current/actions" ] && [ "$(ls -A $GHE_DATA_DIR/current/actions)" ]; then
link_dest="--link-dest=$GHE_DATA_DIR/current/actions"
fi
# Transfer all Actions data from the user data directory using rsync.
ghe_verbose "* Transferring Actions files from $host ..."
log_rsync "BEGIN: actions rsync" 1>&3
ghe-rsync -av \
-e "ghe-ssh -p $port" \
--rsync-path='sudo -u actions rsync' \
--exclude "mutexes" --exclude "dumps" --exclude "tmp" \
$link_dest \
"$host:$GHE_REMOTE_DATA_USER_DIR/actions/" \
"$GHE_SNAPSHOT_DIR/actions" 1>&3
log_rsync "END: actions rsync" 1>&3
bm_end "$(basename $0)"

Просмотреть файл

@ -1,708 +0,0 @@
#!/usr/bin/env bash
# Usage: . ghe-backup-config
# GitHub Enterprise backup shell configuration.
#
# This file is sourced by the various utilities under bin and share/github-backup-utils to
# load in backup configuration and ensure things are configured properly.
#
# All commands in share/github-backup-utils/ should start with the following:
#
# . $( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config
#
# And all commands in bin/ should start with the following:
#
# . $( dirname "${BASH_SOURCE[0]}" )/../share/github-backup-utils/ghe-backup-config
#
set +o posix
# Terminal colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Assume this script lives in share/github-backup-utils/ when setting the root
GHE_BACKUP_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
# Get the version from the version file.
BACKUP_UTILS_VERSION="$(cat "$GHE_BACKUP_ROOT/share/github-backup-utils/version")"
# If a version check was requested, show the current version and exit
if [ -n "$GHE_SHOW_VERSION" ]; then
echo "GitHub backup-utils v$BACKUP_UTILS_VERSION"
exit 0
fi
# Check for "--help|-h" in args or GHE_SHOW_HELP=true and show usage
# shellcheck disable=SC2120 # Our arguments are optional and not meant to be the owning script's
print_usage() {
grep '^#/' <"$0" | cut -c 4-
exit "${1:-1}"
}
if [ -n "$GHE_SHOW_HELP" ]; then
print_usage
else
for a in "$@"; do
if [ "$a" = "--help" ] || [ "$a" = "-h" ]; then
print_usage
fi
done
fi
# Save off GHE_HOSTNAME from the environment since we want it to override the
# backup.config value when set.
GHE_HOSTNAME_PRESERVE="$GHE_HOSTNAME"
# Source in the backup config file from the copy specified in the environment
# first and then fall back to the backup-utils root, home directory and system.
config_found=false
for f in "$GHE_BACKUP_CONFIG" "$GHE_BACKUP_ROOT/backup.config" \
"$HOME/.github-backup-utils/backup.config" "/etc/github-backup-utils/backup.config"; do
if [ -f "$f" ]; then
GHE_BACKUP_CONFIG="$f"
# shellcheck disable=SC1090 # This is a user-supplied value that can't be predicted
. "$GHE_BACKUP_CONFIG"
config_found=true
break
fi
done
GHE_RESTORE_IN_PROGRESS=$(readlink -fm "${GHE_DATA_DIR}/in-progress-restore")
GHE_BACKUP_IN_PROGRESS=$(readlink -fm "${GHE_DATA_DIR}/in-progress-backup")
export GHE_RESTORE_IN_PROGRESS
export GHE_BACKUP_IN_PROGRESS
# Logging display and formatting functions
log_level() {
local level=$1
shift
local message=$*
local display=""
local timestamp
timestamp=$(date -u "+%FT%TZ")
if [ "$TERM" = "dumb" ] || [[ "$OUTPUT_COLOR" != "yes" ]]; then
if [ "$level" = "info" ]; then
display="INFO"
elif [ "$level" = "warn" ]; then
display="WARN"
elif [ "$level" = "error" ]; then
display="ERROR"
elif [ "$level" = "verbose" ]; then
display="INFO"
elif [ "$level" = "rsync" ]; then
display="RSYNC"
elif [ "$level" = "ssh" ]; then
display="SSH"
else
display="-"
fi
else
if [ "$level" = "info" ]; then
display="${GREEN}INFO${NC}"
elif [ "$level" = "warn" ]; then
display="${YELLOW}WARN${NC}"
elif [ "$level" = "error" ]; then
display="${RED}ERROR${NC}"
elif [ "$level" = "verbose" ]; then
display="${GREEN}INFO${NC}"
elif [ "$level" = "rsync" ]; then
display="${GREEN}RSYNC${NC}"
elif [ "$level" = "ssh" ]; then
display="${GREEN}SSH${NC}"
else
display="-"
fi
fi
echo -e "$timestamp $display $message"
}
log_info(){
log_level "info" "$1"
}
log_warn(){
log_level "warn" "$1"
}
log_error(){
log_level "error" "$1"
}
log_verbose(){
log_level "verbose" "$1"
}
log_rsync(){
log_level "rsync" "$1"
}
log_ssh(){
log_level "ssh" "$1"
}
# Add the bin and share/github-backup-utils dirs to PATH
PATH="$GHE_BACKUP_ROOT/bin:$GHE_BACKUP_ROOT/share/github-backup-utils:$PATH"
# shellcheck source=share/github-backup-utils/bm.sh
. "$GHE_BACKUP_ROOT/share/github-backup-utils/bm.sh"
# shellcheck source=share/github-backup-utils/ghe-incremental-backup-restore
. "$GHE_BACKUP_ROOT/share/github-backup-utils/ghe-incremental-backup-restore"
# shellcheck source=share/github-backup-utils/track-progress
. "$GHE_BACKUP_ROOT/share/github-backup-utils/track-progress"
ghe_restore_check() {
if [ -h "$GHE_RESTORE_IN_PROGRESS" ]; then
echo " Error: detected a restore already in progress from a previous version of ghe-restore." 1>&2
echo " If there is no restore in progress anymore, please remove" 1>&2
echo " the $GHE_RESTORE_IN_PROGRESS file and try again." 1>&2
exit 1
fi
if [ -f "$GHE_RESTORE_IN_PROGRESS" ]; then
progress=$(cat "$GHE_RESTORE_IN_PROGRESS")
pid=$(echo "$progress" | cut -d ' ' -f 2)
echo " Error: A restore of $GHE_HOSTNAME may still be running on PID $pid." 1>&2
echo " If PID $pid is not a process related to the restore utilities, please remove" 1>&2
echo " the $GHE_RESTORE_IN_PROGRESS file and try again." 1>&2
exit 1
fi
}
ghe_backup_check() {
if [ -h "$GHE_BACKUP_IN_PROGRESS" ]; then
echo " Error: detected a backup already in progress from a previous version of ghe-backup." 1>&2
echo " If there is no backup in progress anymore, please remove" 1>&2
echo " the $GHE_DATA_DIR/$GHE_BACKUP_IN_PROGRESS file and try again." 1>&2
exit 1
fi
if [ -f "$GHE_BACKUP_IN_PROGRESS" ]; then
progress=$(cat "$GHE_BACKUP_IN_PROGRESS")
pid=$(echo "$progress" | cut -d ' ' -f 2)
echo " Error: A backup of $GHE_HOSTNAME may still be running on PID $pid." 1>&2
echo " If PID $pid is not a process related to the backup utilities, please remove" 1>&2
echo " the $GHE_BACKUP_IN_PROGRESS file and try again." 1>&2
exit 1
fi
}
ghe_restore_finished() {
if [ -f "$GHE_RESTORE_IN_PROGRESS" ]; then
rm -f "$GHE_RESTORE_IN_PROGRESS"
fi
}
ghe_backup_finished() {
if [ -f "$GHE_BACKUP_IN_PROGRESS" ]; then
rm -f "$GHE_BACKUP_IN_PROGRESS"
fi
}
ghe_parallel_check() {
GHE_PARALLEL_COMMAND_OPTIONS=()
GHE_PARALLEL_RSYNC_COMMAND_OPTIONS=()
if [ "$GHE_PARALLEL_ENABLED" != "yes" ]; then
return 0
fi
# Some machines may have both moreutils parallel and GNU parallel installed.
# Check some variants to find it
GHE_PARALLEL_COMMAND="parallel"
local x
for x in \
/usr/bin/parallel-moreutils \
/usr/bin/parallel.moreutils \
/usr/bin/parallel_moreutils \
/usr/bin/moreutils-parallel \
/usr/bin/moreutils.parallel \
/usr/bin/moreutils_parallel \
; do
if [ -x "${x}" ]; then
GHE_PARALLEL_COMMAND="${x}"
break
fi
done
# Check that the GHE_PARALLEL_COMMAND is pointing to moreutils parallel
if ! "$GHE_PARALLEL_COMMAND" -h | grep -q "parallel \[OPTIONS\] command -- arguments"; then
echo "Error: moreutils not found. Please install https://joeyh.name/code/moreutils" 1>&2
exit 1
fi
if [ -n "$GHE_PARALLEL_MAX_JOBS" ]; then
GHE_PARALLEL_COMMAND_OPTIONS+=(-j "$GHE_PARALLEL_MAX_JOBS")
# Default to the number of max rsync jobs to the same as GHE_PARALLEL_MAX_JOBS, if not set.
# This is only applicable to ghe-restore-repositories currently.
: "${GHE_PARALLEL_RSYNC_MAX_JOBS:="$GHE_PARALLEL_MAX_JOBS"}"
fi
if [ -n "$GHE_PARALLEL_RSYNC_MAX_JOBS" ]; then
GHE_PARALLEL_RSYNC_COMMAND_OPTIONS+=(-j "$GHE_PARALLEL_RSYNC_MAX_JOBS")
fi
if [ -n "$GHE_PARALLEL_MAX_LOAD" ]; then
GHE_PARALLEL_COMMAND_OPTIONS+=(-l "$GHE_PARALLEL_MAX_LOAD")
GHE_PARALLEL_RSYNC_COMMAND_OPTIONS+=(-l "$GHE_PARALLEL_MAX_LOAD")
fi
}
# Check that the config file exists before we source it in.
if ! $config_found; then
echo "Error: No backup configuration file found. Tried:" 1>&2
[ -n "$GHE_BACKUP_CONFIG" ] && echo " - $GHE_BACKUP_CONFIG" 1>&2
echo " - $GHE_BACKUP_ROOT/backup.config" 1>&2
echo " - $HOME/.github-backup-utils/backup.config" 1>&2
echo " - /etc/github-backup-utils/backup.config" 1>&2
exit 2
fi
# If verbose logging is enabled, redirect fd 3 to stdout or the specified log file;
# otherwise, redirect it to /dev/null. Write verbose output to fd 3.
if [ -n "$GHE_VERBOSE" ]; then
if [ -n "$GHE_VERBOSE_LOG" ]; then
if [ "$GHE_PARALLEL_ENABLED" != "yes" ]; then
exec 3>>"$GHE_VERBOSE_LOG"
else
calling_script_name="$(caller | sed 's:.*/::')"
if [ "$TERM" = "dumb" ] || [[ "$OUTPUT_COLOR" != "yes" ]]; then
exec 3>>"$GHE_VERBOSE_LOG"
log_info "$calling_script_name $*" 1>&3
else
# colorize the input if supported.
display_caller="${BLUE}$calling_script_name${NC}"
exec 3>>"$GHE_VERBOSE_LOG"
log_info "$display_caller $*" 1>&3
fi
fi
else
exec 3>&1
fi
else
exec 3>/dev/null
fi
# Restore saved off hostname.
[ -n "$GHE_HOSTNAME_PRESERVE" ] && GHE_HOSTNAME="$GHE_HOSTNAME_PRESERVE"
# Check that the GHE hostname is set.
if [ -z "$GHE_HOSTNAME" ]; then
echo "Error: GHE_HOSTNAME not set in config file." 1>&2
exit 2
fi
# Check that the GHE data directory is set.
if [ -z "$GHE_DATA_DIR" ]; then
echo "Error: GHE_DATA_DIR not set in config file." 1>&2
exit 2
fi
# Convert the data directory path to an absolute path, basing any relative
# paths on the backup-utils root, and use readlink to canonicalize the path.
if [ "${GHE_DATA_DIR:0:1}" != "/" ]; then
GHE_DATA_DIR="$(cd "$GHE_BACKUP_ROOT" && readlink -m "$GHE_DATA_DIR")"
fi
export GHE_DATA_DIR
# Assign the Release File path if it hasn't been provided (eg: by test suite)
: "${GHE_RELEASE_FILE:="/etc/github/enterprise-release"}"
# Check that utils are not being run directly on GHE appliance.
if [ -f "$GHE_RELEASE_FILE" ]; then
echo "Error: Backup Utils cannot be run on the GitHub Enterprise host." 1>&2
echo " The backup utilities should be run on a host dedicated to" 1>&2
echo " long-term permanent storage and must have network connectivity" 1>&2
echo " with the GitHub Enterprise appliance." 1>&2
exit 1
fi
GHE_CREATE_DATA_DIR=${GHE_CREATE_DATA_DIR:-yes}
# Check that the data directory is set and create it if it doesn't exist.
if [ ! -d "$GHE_DATA_DIR" ] && [ "$GHE_CREATE_DATA_DIR" = "yes" ]; then
echo "Creating the backup data directory ..." 1>&3
mkdir -p "$GHE_DATA_DIR"
fi
if [ ! -d "$GHE_DATA_DIR" ]; then
echo "Error: GHE_DATA_DIR $GHE_DATA_DIR does not exist." >&2
exit 8
fi
# Set some defaults if needed.
: "${GHE_NUM_SNAPSHOTS:=10}"
# Generate a backup timestamp if one has not already been generated.
# We export the variable so the process group shares the same value.
: "${GHE_SNAPSHOT_TIMESTAMP:=$(date +"%Y%m%dT%H%M%S")}"
export GHE_SNAPSHOT_TIMESTAMP
# Set the current snapshot directory to <data-dir>/<timestamp>. This is where
# all backups should be written for the current invocation.
GHE_SNAPSHOT_DIR="$GHE_DATA_DIR"/"$GHE_SNAPSHOT_TIMESTAMP"
export GHE_SNAPSHOT_DIR
# The root filesystem location. This must be used so that tests can override
# the root as a local directory location.
: "${GHE_REMOTE_ROOT_DIR:=""}"
# The root location of persistent data and applications on the remote side. This
# is always "/data" for GitHub instances. Use of this variable allows
# the location to be overridden in tests.
: "${GHE_REMOTE_DATA_DIR:="/data"}"
# The root location of user data stores such as git repositories, pages sites,
# elasticsearch indices, etc. This is "/data" under 1.x filesystem layouts and
# "/data/user" under the 2.x filesystem layout. The location is adjusted
# dynamically in ghe_remote_version_config() immediately after obtaining the
# remote version. Utilities that transfer data in and out of the appliance
# should use this variable to ensure proper behavior under different versions.
: "${GHE_REMOTE_DATA_USER_DIR:="$GHE_REMOTE_DATA_DIR"}"
# The location of the license file on the remote side. This is always
# "/data/enterprise/enterprise.ghl" for GitHub instances. Use of this variable
# allows the location to be overridden in tests.
: "${GHE_REMOTE_LICENSE_FILE:="$GHE_REMOTE_DATA_DIR/enterprise/enterprise.ghl"}"
# The number of seconds to wait for in progress git-gc processes to complete
# before starting the sync of git data. See share/github-backup-utils/ghe-backup-repositories-rsync
# for more information. Default: 10 minutes.
: "${GHE_GIT_COOLDOWN_PERIOD:=600}"
# Set "true" to get verbose logging of all ssh commands on stderr
: "${GHE_VERBOSE_SSH:=false}"
# The location of the cluster configuration file on the remote side.
# This is always "/data/user/common/cluster.conf" for GitHub Cluster instances.
# Use of this variable allows the location to be overridden in tests.
: "${GHE_REMOTE_CLUSTER_CONF_FILE:="$GHE_REMOTE_DATA_DIR/user/common/cluster.conf"}"
# The location of the file used to disable GC operations on the remote side.
: "${SYNC_IN_PROGRESS_FILE:="$GHE_REMOTE_DATA_USER_DIR/repositories/.sync_in_progress"}"
# Base path for temporary directories and files.
: "${TMPDIR:="/tmp"}"
# Backup cadence for MS SQL. Determines the kind of backup taken, either full, differential,
# or transaction log, based on when the last backup of that kind was taken. This defaults to
# taking a full backup once a week, a differential backup once a day, and transaction logs every
# 15 minutes.
: "${GHE_MSSQL_BACKUP_CADENCE:=10080,1440,15}"
###############################################################################
### Dynamic remote version config
# Adjusts remote paths based on the version of the remote appliance. This is
# called immediately after the remote version is obtained by
# ghe_remote_version_required(). Child processes inherit the values set here.
ghe_remote_version_config() {
GHE_REMOTE_DATA_USER_DIR="$GHE_REMOTE_DATA_DIR/user"
export GHE_REMOTE_DATA_DIR GHE_REMOTE_DATA_USER_DIR
export GHE_REMOTE_LICENSE_FILE
}
###############################################################################
### Utility functions
# Run ghe-host-check and establish the version of the remote GitHub instance in
# the exported GHE_REMOTE_VERSION variable. If the remote version has already
# been established then don't perform the host check again. Utilities in share/github-backup-utils
# that need the remote version should use this function instead of calling
# ghe-host-check directly to reduce ssh roundtrips. The top-level ghe-backup and
# ghe-restore commands establish the version for all subcommands.
# shellcheck disable=SC2120 # Our arguments are optional and not meant to be the owning script's
ghe_remote_version_required() {
if [ -z "$GHE_REMOTE_VERSION" ]; then
_out=$(ghe-host-check "$@")
echo "$_out"
_out_hostname=$(echo "$_out" | tail -n 1)
# override hostname w/ ghe-host-check output because the port could have
# been autodetected to 122.
GHE_HOSTNAME="${_out_hostname/Connect /}"
GHE_HOSTNAME="${GHE_HOSTNAME/ OK*/}"
export GHE_HOSTNAME
GHE_REMOTE_VERSION="${_out_hostname#*\(}"
GHE_REMOTE_VERSION="${GHE_REMOTE_VERSION%%\)*}"
export GHE_REMOTE_VERSION
ghe_parse_remote_version "$GHE_REMOTE_VERSION"
ghe_remote_version_config "$GHE_REMOTE_VERSION"
fi
true
}
# Parse a version string into major, minor and patch parts and echo.
ghe_parse_version() {
local version_major version_minor version_patch
IFS=. read -r version_major version_minor version_patch _ <<<"${1#v}"
version_patch=${version_patch%%[a-zA-Z]*}
echo "$version_major $version_minor $version_patch"
}
# Parse major, minor, and patch parts of the remote appliance version and store
# in GHE_VERSION_MAJOR, GHE_VERSION_MINOR, and GHE_VERSION_PATCH variables. All
# parts are numeric. This is called automatically from
# ghe_remote_version_required so shouldn't be used directly.
#
# Scripts use these variables to alter behavior based on what's supported on the
# appliance version.
ghe_parse_remote_version() {
# shellcheck disable=SC2046 # Word splitting is required to populate the variables
read -r GHE_VERSION_MAJOR GHE_VERSION_MINOR GHE_VERSION_PATCH < <(ghe_parse_version "$1")
export GHE_VERSION_MAJOR GHE_VERSION_MINOR GHE_VERSION_PATCH
}
# In 3.11 we started to install 2 different version parallel(s)
# moreutils parallel and GNU parallel. When gnu parallel is installed,
# it renames moreutils parallel to parallel.moreutils
# set $PARALLEL_CMD envvar to be used in place of parallel commands
ghe_remote_parallel() {
if [ -z "$GHE_REMOTE_VERSION" ]; then
echo "Error: ghe_remote_version_required needs to be invoked before ghe_remote_parallel" 1>&2
exit 1
fi
if [ "$GHE_VERSION_MINOR" -lt 11 ]; then
PARALLEL_CMD="parallel"
else
PARALLEL_CMD="parallel.moreutils"
fi
export PARALLEL_CMD
}
# Parses the <host> part out of a "<host>:<port>" or just "<host>" string.
# This is used primarily to break hostspecs with non-standard ports down for
# rsync commands.
ssh_host_part() {
[ "${1##*:}" = "$1" ] && echo "$1" || echo "${1%:*}"
}
# Parses the <port> part out of a "<host>:<port>" or just "<host>" string.
# This is used primarily to break hostspecs with non-standard ports down for
# rsync commands.
ssh_port_part() {
if [ "${1##*:}" != "$1" ] && [ "${1##*:}" -ne "122" ]; then
echo "Error: SSH port has to be 122 connecting to GitHub Enterprise Server, current value is ${1##*:} for $1." 1>&2
exit 1
fi
echo 122
}
# Usage: ghe_remote_logger <message>...
# Log a message to /var/log/syslog on the remote instance.
# Note: Use sparingly. Remote logging requires an ssh connection per invocation.
ghe_remote_logger() {
echo "$@" |
ghe-ssh "$GHE_HOSTNAME" -- logger -t backup-utils || true
}
# Usage: ghe_verbose <message>
# Log if verbose mode is enabled (GHE_VERBOSE or `-v`).
ghe_verbose() {
if [ -n "$GHE_VERBOSE" ]; then
log_verbose "$@" 1>&3
fi
}
# Usage: ghe_debug <message> OR echo <message> | ghe_debug
# Log if debug mode is enabled (GHE_DEBUG).
ghe_debug() {
[ -z "$GHE_DEBUG" ] && return
if [ $# -ne 0 ]; then
echo -e "Debug: $*" 1>&3
elif [ -p /dev/stdin ]; then
echo -e "\n" 1>&3
while read -r line; do
echo -e "Debug: $line" 1>&3
done </dev/stdin
fi
}
version() {
local v="${*#v}"
# Discard stderr and always return true as trailing alpha (eg. "v1.2.3pre") will upset printf
# shellcheck disable=SC2183,SC2086 # We want to glob (SC2086) and expect 4 (fuzzy) params (SC2183)
printf "%d%03d%03d%03d\n" ${v//./ } 2>/dev/null || true
}
# The list of gists returned by the source changed in 2.16.23, 2.17.14,
# 2.18.8, and 2.19.3. We need to account for this difference here.
# In older versions, all paths need to be truncated with `dirname`.
# In newer versions, gist paths are unmodified, and only other repo types
# are truncated with `dirname`.
fix_paths_for_ghe_version() {
if [[ "$GHE_REMOTE_VERSION" =~ 2.16. && "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.16.23)" ]] ||
[[ "$GHE_REMOTE_VERSION" =~ 2.17. && "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.17.14)" ]] ||
[[ "$GHE_REMOTE_VERSION" =~ 2.18. && "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.18.8)" ]] ||
[[ "$(version "$GHE_REMOTE_VERSION")" -ge "$(version 2.19.3)" ]]; then
GIST_FILTER=(-e "/gist/b")
else
unset GIST_FILTER
fi
# This sed expression is equivalent to running `dirname` on each line,
# but without all the fork+exec overhead of calling `dirname` that many
# times:
# 1. strip off trailing slashes
# 2. if the result has no slashes in it, the dirname is "."
# 3. truncate from the final slash (if any) to the end
# If the GIST_FILTER was set above (because we're on a modern version of
# GHES), then don't modify lines with "gist" in them.
sed "${GIST_FILTER[@]}" -e 's/\/$//; s/^[^\/]*$/./; s/\/[^\/]*$//'
}
is_binary_backup_feature_on() {
ghe-ssh "$GHE_HOSTNAME" ghe-config --true "mysql.backup.binary"
}
# Check if the backup is binary by looking up the sentinel file
is_binary_backup() {
test -f "$1/mysql-binary-backup-sentinel"
}
# Check if a given service is managed externally on the appliance or in a snapshot.
# Usage: is_service_external $service [$config_file]
# Pass in the config file to check if the service is managed externally in the snapshot.
is_service_external(){
service=$1
config_file=$2
case $service in
"mysql")
if [ -n "$config_file" ]; then
enabled=$(GIT_CONFIG="$config_file" git config mysql.external.enabled)
[ "$enabled" == "true" ];
else
ghe-ssh "$GHE_HOSTNAME" -- ghe-config --true "mysql.external.enabled"
fi
;;
*)
return 1
;;
esac
}
is_instance_configured(){
ghe-ssh "$GHE_HOSTNAME" -- "[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/configured' ]"
}
# Helper method that returns true if:
# - the target appliance uses the internal MySQL database (aka NOT BYODB), and
# - the snapshot being restored is from an appliance using an external MySQL database (BYODB)
external_database_snapshot_to_internal_database(){
! is_external_database_target && is_external_database_snapshot
}
# Helper method that returns true if:
# - the target appliance uses an external MySQL database (BYODB), and
# - the snapshot being restored is from an appliance using an internal MySQL database (aka NOT BYODB)
internal_database_snapshot_to_external_database(){
is_external_database_target && ! is_external_database_snapshot
}
is_external_database_target_or_snapshot(){
# If restoring settings, only check if the snapshot being restored was from an appliance with external DB configured.
if $RESTORE_SETTINGS; then
is_external_database_snapshot
else
# Check if restoring a snapshot with an external database configured, or restoring
# to an appliance with an external database configured.
is_external_database_snapshot || is_external_database_target
fi
}
is_external_database_target(){
is_service_external "mysql"
}
is_external_database_snapshot(){
is_service_external "mysql" "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/settings.json"
}
# This file exists if this is a backup for an external database AND the backup was
# taken via our logical backup strategy.
is_default_external_database_snapshot(){
is_external_database_snapshot && test -f "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/logical-external-database-backup-sentinel"
}
prompt_for_confirmation(){
echo "$1"
printf "Type 'yes' to continue: "
while read -r response; do
case $response in
yes|Yes|YES)
break
;;
'')
printf "Type 'yes' to continue: "
;;
*)
echo "Restore aborted." 1>&2
exit 1
;;
esac
done
echo
}
#initialize progress tracking by clearing out the temp files used to track
init-progress() {
if [ -e /tmp/backup-utils-progress ]; then
rm -rf /tmp/backup-utils-progress/*
fi
# shellcheck disable=SC2174 # We are fine with -m only affecting the deepest directory
mkdir -m 777 -p /tmp/backup-utils-progress
touch /tmp/backup-utils-progress/{total,type,progress,info}
}
#increase total count of progress
increment-progress-total-count() {
((PROGRESS_TOTAL += $1))
echo "$PROGRESS_TOTAL" > /tmp/backup-utils-progress/total
}
##
# This function is used by ghe-gc-disable, ghe-backup-repositories, and ghe-backup-storage
# This function should be used directly to disable and drain GC operations ONLY on HA-replica node
# (as done in ghe-backup-repositories and ghe-backup-storage)
# Otherwise use ghe-gc-disable which will call this function with the correct parameters.
#
# Arguments:
# $1 - path to sync-in-progress file ($SYNC_IN_PROGRESS_FILE)
# $2 - git cooldown period ($GHE_GIT_COOLDOWN_PERIOD)
##
gc_disable() {
set -e
local sync_in_progress="$1"
local git_cooldown_period="$2"
# Touch the sync-in-progress file, disabling GC operations, and wait for all
# active GC processes to finish on the remote side.
sudo -u git touch "$sync_in_progress"
for _ in $(seq $git_cooldown_period); do
# note: the bracket synta[x] below is to prevent matches against the
# grep process itself.
if ps axo args | grep -E -e "^git( -.*)? nw-repac[k]( |$)" -e "^git( -.*)? g[c]( |$)" >/dev/null; then
sleep 1
else
exit 0
fi
done
exit 7
}

Просмотреть файл

@ -1,58 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-es-audit-log
#/ Take a backup of audit logs in Elasticsearch.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root elastic backup directory based on config
host="$GHE_HOSTNAME"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Make sure root backup dir exists if this is the first run
mkdir -p "$GHE_SNAPSHOT_DIR/audit-log"
if ! indices=$(ghe-ssh "$host" "curl -s \"localhost:9201/_cat/indices/audit_log*?h=index,pri.store.size&bytes=b\""); then
log_error "ghe-backup-es-audit-log: Failed to retrieve audit log indices." 1>&2
exit 1
fi
# Exit if no indices were found
[ -z "$indices" ] && exit
# Determine if the audit log migration has occurred or is needed.
if echo 'set -o pipefail; ! test -e /data/user/common/es-scan-complete && test -f /usr/local/share/enterprise/run-audit-log-transitions.sh' | ghe-ssh "$host" /bin/bash; then
if echo 'set -o pipefail; echo n | /usr/local/share/enterprise/run-audit-log-transitions.sh > /dev/null 2>&1 && touch /data/user/common/es-scan-complete' | ghe-ssh "$host" /bin/bash; then
touch $GHE_SNAPSHOT_DIR/es-scan-complete
fi
fi
IFS=$'\n'
for index in $indices; do
IFS=' '
set $index
index_name=$1
index_size=$2
if [[ -f $GHE_DATA_DIR/current/audit-log/$index_name.gz && $(cat $GHE_DATA_DIR/current/audit-log/$index_name.gz.size 2>/dev/null || true) -eq $index_size ]]; then
ghe_verbose "* Linking unchanged audit log index: $index_name"
# Hard link any indices that have not changed since the last backup
ln $GHE_DATA_DIR/current/audit-log/$index_name.gz $GHE_SNAPSHOT_DIR/audit-log/$index_name.gz
ln $GHE_DATA_DIR/current/audit-log/$index_name.gz.size $GHE_SNAPSHOT_DIR/audit-log/$index_name.gz.size
else
ghe_verbose "* Performing audit log export for index: $index_name"
echo "/usr/local/share/enterprise/ghe-es-dump-json \"http://localhost:9201/$index_name\" | gzip" | ghe-ssh "$host" -- /bin/bash > $GHE_SNAPSHOT_DIR/audit-log/$index_name.gz
echo $index_size > $GHE_SNAPSHOT_DIR/audit-log/$index_name.gz.size
fi
done
bm_end "$(basename $0)"

Просмотреть файл

@ -1,86 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-es-rsync
#/ Take an online, incremental snapshot of Elasticsearch indices.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup when the rsync strategy is used.
# shellcheck disable=SC2086
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root elastic backup directory based on config
host="$GHE_HOSTNAME"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Make sure root backup dir exists if this is the first run
mkdir -p "$GHE_SNAPSHOT_DIR/elasticsearch"
# Verify that the /data/elasticsearch directory exists.
if ! ghe-ssh "$host" -- "[ -d '$GHE_REMOTE_DATA_USER_DIR/elasticsearch' ]"; then
ghe_verbose "* The '$GHE_REMOTE_DATA_USER_DIR/elasticsearch' directory doesn't exist."
exit 0
fi
# If we have a previous increment, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$GHE_DATA_DIR/current/elasticsearch" ]; then
link_dest="--link-dest=../../current/elasticsearch"
fi
# Transfer ES indices from a GitHub instance to the current snapshot
# directory, using a previous snapshot to avoid transferring files that have
# already been transferred.
ghe_verbose "* Performing initial sync of ES indices ..."
log_rsync "BEGIN elasticsearch rsync" 1>&3
ghe-rsync -av \
-e "ghe-ssh -p $(ssh_port_part "$host")" \
--rsync-path="sudo -u elasticsearch rsync" \
$link_dest \
"$(ssh_host_part "$host"):$GHE_REMOTE_DATA_USER_DIR/elasticsearch/" \
"$GHE_SNAPSHOT_DIR/elasticsearch" 1>&3
log_rsync "END elasticsearch rsync" 1>&3
# Set up a trap to re-enable flushing on exit and remove temp file
cleanup () {
ghe_verbose "* Enabling ES index flushing ..."
echo '{"index":{"translog.flush_threshold_size":"512MB"}}' |
ghe-ssh "$host" -- curl -s -XPUT "localhost:9200/_settings" -d @- >/dev/null
}
trap 'cleanup' EXIT
trap 'exit $?' INT # ^C always terminate
# Disable ES flushing and force a flush right now
ghe_verbose "* Disabling ES index flushing ..."
echo '{"index":{"translog.flush_threshold_size":"1PB"}}' |
ghe-ssh "$host" -- curl -s -XPUT "localhost:9200/_settings" -d @- >/dev/null
ghe-ssh "$host" -- curl -s -XPOST "localhost:9200/_flush" >/dev/null
# Transfer all ES indices again
ghe_verbose "* Performing follow-up sync of ES indices ..."
log_rsync "BEGIN: elasticsearch followup rsync" 1>&3
ghe-rsync -av \
-e "ghe-ssh -p $(ssh_port_part "$host")" \
--rsync-path="sudo -u elasticsearch rsync" \
$link_dest \
"$(ssh_host_part "$host"):$GHE_REMOTE_DATA_USER_DIR/elasticsearch/" \
"$GHE_SNAPSHOT_DIR/elasticsearch" 1>&3
log_rsync "END: elasticsearch followup rsync" 1>&3
# "Backup" audit log migration sentinel file
if ghe-ssh "$host" -- "test -f $GHE_REMOTE_DATA_USER_DIR/common/es-scan-complete"; then
touch $GHE_SNAPSHOT_DIR/es-scan-complete
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,91 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-fsck <snapshot-dir> [--print-nwo]
#/
#/ Run git fsck on backed up repositories.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
echo "Running git fsck on repos..."
# Verify git is available.
if ! git --version 1>/dev/null 2>&1; then
log_error "git not found." 1>&2
exit 1
fi
sdir=$1
repos=0
errors=0
log=$(mktemp -t ghe-backup-fsck-XXXXXX)
t_start=$(date +%s)
if git fsck -h | grep -q '\-\-dangling'; then
git_cmd='git fsck --no-dangling'
else
log_warn "ghe-backup-fsck: old git version, --no-dangling not available" 1>&3
git_cmd='git fsck'
fi
if [ -z "$sdir" ] || [ ! -d "$sdir" ]; then
print_usage
fi
if [ ! -d "$sdir/repositories" ]; then
log_error "ghe-backup-fsck: $sdir is not a valid snapshot." >&2
exit 1
fi
# shellcheck disable=SC2044 # Snapshot and repository directory names are safe for find iteration.
for repo in $(find $sdir/repositories/ -type d -name \*.git); do
repos=$(($repos+1))
before_time=$(date +%s)
status=$(
set -e
cd $repo
nwo="-"
if [ "$2" = "--print-nwo" ] && [ -f info/nwo ]; then
nwo="$(cat info/nwo)"
fi
if [ ! -f objects/info/alternates ] || grep -q '^\.\.' objects/info/alternates; then
$git_cmd >$log 2>&1 && {
echo "OK $repo $nwo"; exit
}
else
GIT_ALTERNATE_OBJECT_DIRECTORIES=../network.git/objects $git_cmd >$log 2>&1 && {
echo "WARN $repo $nwo (alternates absolute path)"; exit
}
fi
echo "ERROR $repo $nwo"
)
elapsed_time=$(($(date +%s) - before_time))
if [[ ! "$status" =~ ^OK ]] || [ $elapsed_time -gt 5 ]; then
echo "$status ${elapsed_time}s" 1>&3
[ -n "$GHE_VERBOSE" ] && cat $log
fi
case "$status" in
OK*)
;;
ERROR*)
errors=$(($errors+1))
;;
esac
done
log_info "* Repos verified: $repos, Errors: $errors, Took: $(($(date +%s) - $t_start))s"
rm -f $log
bm_end "$(basename $0)"

Просмотреть файл

@ -1,114 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-git-hooks
#/ Take an online, incremental snapshot of custom Git hooks configuration.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
backup_dir="$GHE_SNAPSHOT_DIR/git-hooks"
# Location of last good backup for rsync --link-dest
backup_current="$GHE_DATA_DIR/current/git-hooks"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
ssh_config_file_opt=
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
opts="$GHE_EXTRA_SSH_OPTS"
# git server hostnames under cluster
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "git-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
# Removes the remote sync-in-progress file on exit, re-enabling GC operations
# on the remote instance.
cleanup() {
rm -rf $tempdir
}
trap 'cleanup' EXIT
trap 'exit $?' INT # ^C always terminate
# Transfer Git hooks data from a GitHub instance to the current snapshot
# directory, using a previous snapshot to avoid transferring files that have
# already been transferred. A set of rsync filter rules are provided on stdin
# for each invocation.
rsync_git_hooks_data () {
port=$(ssh_port_part "$1")
host=$(ssh_host_part "$1")
subpath=$2
shift 2
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$backup_current/$subpath" ] && [ "$(ls -A $backup_current/$subpath)" ]; then
subdir="git-hooks/$subpath"
link_path=".."
while true; do
if [ "$(dirname $subdir)" = "." ]; then
break
fi
if [ "$(dirname $subdir)" = "/" ]; then
break
fi
link_path="../$link_path"
subdir=$(dirname $subdir)
done
local link_dest="--link-dest=../${link_path}/current/git-hooks/$subpath"
fi
# Ensure target directory exists, is needed with subdirectories
mkdir -p "$backup_dir/$subpath"
log_rsync "BEGIN: git-hooks sync" 1>&3
ghe-rsync -av \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" $link_dest \
--rsync-path='sudo -u git rsync' \
"$host:$GHE_REMOTE_DATA_USER_DIR/git-hooks/$subpath/" \
"$backup_dir/$subpath" 1>&3
log_rsync "END: git-hooks sync" 1>&3
}
hostname=$(echo $hostnames | awk '{ print $1; }')
if ghe-ssh $ssh_config_file_opt "$hostname:122" -- "sudo -u git [ -d '$GHE_REMOTE_DATA_USER_DIR/git-hooks/environments/tarballs' ]"; then
rsync_git_hooks_data $hostname:122 environments/tarballs
else
ghe_verbose "git-hooks environment tarballs not found. Skipping ..."
fi
if ghe-ssh $ssh_config_file_opt "$hostname:122" -- "sudo -u git [ -d '$GHE_REMOTE_DATA_USER_DIR/git-hooks/repos' ]"; then
rsync_git_hooks_data $hostname:122 repos
else
ghe_verbose "git-hooks repositories not found. Skipping ..."
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,55 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-minio
#/ Take an online, incremental snapshot of all minio data
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/ghe-backup-config"
bm_start "$(basename "${0}")"
# Set up remote host and root backup snapshot directory based on config
port="$(ssh_port_part "${GHE_HOSTNAME}")"
host="$(ssh_host_part "${GHE_HOSTNAME}")"
backup_dir="${GHE_SNAPSHOT_DIR}/minio"
# Verify rsync is available.
if ! command -v rsync 1> /dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "${host}"
# Make sure root backup dir exists if this is the first run
mkdir -p "${backup_dir}"
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
# Hilariously, this HAS to stay unquoted when you call `rsync` further
# down because when the shell interpolates this out, `rsync` will throw
# an absolute fit if this variable is quoted. Surprise!
if [[ -d "${GHE_DATA_DIR}/current/minio" ]] &&
[[ "$(ls -A "${GHE_DATA_DIR}/current/minio")" ]]; then
link_dest="--link-dest=${GHE_DATA_DIR}/current/minio"
fi
# Transfer all minio data from the user data directory using rsync.
ghe_verbose "* Transferring minio files from ${host} ..."
log_rsync "BEGIN: minio rsync" 1>&3
ghe-rsync \
--archive \
--verbose \
--rsh="ghe-ssh -p ${port}" \
--rsync-path='sudo -u minio rsync' \
--exclude=".minio.sys" \
${link_dest} \
"${host}:${GHE_REMOTE_DATA_USER_DIR}/minio/" \
"${GHE_SNAPSHOT_DIR}/minio" 1>&3
log_rsync "END: minio rsync" 1>&3
bm_end "$(basename "${0}")"

Просмотреть файл

@ -1,363 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-mssql
#/
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Set up remote host and root backup snapshot directory based on config
backup_dir="$GHE_SNAPSHOT_DIR/mssql"
last_mssql=
backup_command=
backup_type=
full_expire=
diff_expire=
tran_expire=
# Check if the export tool is available in this version
export_tool_available() {
if [ -z "$GHE_TEST_REMOTE_VERSION" ]; then
ghe_ssh_mssql "test -e /usr/local/bin/ghe-export-mssql"
else
# Always return available for test
return 0
fi
}
ghe_ssh_mssql() {
ghe-ssh "${opts[@]}" "${ssh_config_file_opt[@]}" "$GHE_MSSQL_PRIMARY_HOST" "$@"
}
cleanup() {
rm -rf "$tempdir"
}
trap 'cleanup' EXIT INT
# use the mssql primary host if GHES cluster configuration contains a mssql-master or use the ghe server if the mssql-master is not available.
GHE_MSSQL_PRIMARY_NODE="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.mssql-master" || true)"
GHE_MSSQL_PRIMARY_HOST="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.$GHE_MSSQL_PRIMARY_NODE.hostname" || true)"
if [ -z "$GHE_MSSQL_PRIMARY_HOST" ]; then
GHE_MSSQL_PRIMARY_HOST="$GHE_HOSTNAME"
fi
tempdir=$(mktemp -d -t backup-utils-backup-XXXXXX)
ssh_config_file_opt=()
opts=()
isHA="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.ha" || true)"
# get server hostnames under cluster and HA
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ] || [ "$isHA" = "true" ] ; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt=("-F" "$ssh_config_file")
opts=("-o" "UserKnownHostsFile=/dev/null" "-o" "StrictHostKeyChecking=no" "-o" "PasswordAuthentication=no")
ghe-ssh-config "$GHE_HOSTNAME" "$GHE_MSSQL_PRIMARY_HOST" > "$ssh_config_file"
fi
if ! export_tool_available ; then
log_error "ghe-export-mssql is not available" 1>&2
exit 1
fi
add_minute() {
# Expect date string in the format of yyyymmddTHHMMSS
# Here parse date differently depending on GNU Linux vs BSD MacOS
if date -v -1d > /dev/null 2>&1; then
date -v +"$2"M -ujf'%Y%m%dT%H%M%S' "$1" +%Y%m%dT%H%M%S
else
dt=$1
date -u '+%Y%m%dT%H%M%S' -d "${dt:0:8} ${dt:9:2}:${dt:11:2}:${dt:13:2} $2 minutes"
fi
}
find_timestamp() {
filename="${1##*/}"
IFS='@' read -ra parts <<< "$filename"
datetime_part=${parts[1]:0:15}
echo "$datetime_part"
}
actions_dbs() {
all_dbs=$(echo 'set -o pipefail; ghe-mssql-console -y -n -q "SET NOCOUNT ON; SELECT name FROM sys.databases"' | ghe_ssh_mssql /bin/bash)
for db in $all_dbs; do
if [[ ! "$db" =~ ^(master|tempdb|model|msdb)$ ]] && [[ "$db" =~ ^[a-zA-Z0-9_-]+$ ]]; then
echo "$db"
fi
done
}
ensure_same_dbs() {
locals=()
while read -r file; do
filename=$(basename "$file")
locals+=("$filename")
done < <(find "$1" \( -name "*.bak" -o -name "*.diff" -o -name "*.log" \))
for remote in $(actions_dbs); do
remaining_locals=()
for local in "${locals[@]}"; do
if ! [[ "$local" == "$remote"* ]]; then
remaining_locals+=("$local")
fi
done
locals=("${remaining_locals[@]}")
done
if [[ "${#locals[@]}" -ne 0 ]]; then
log_warn "Warning: Found following ${#locals[@]} backup files that can't be traced back to the specified GHES host."
log_warn "Warning: Did you recently reconfigure the GHES host? Move or delete these backup files if no longer needed."
for local in "${locals[@]}"; do
ghe_verbose "$1/$local"
done
exit 1
fi
}
run_query() {
echo "set -o pipefail; ghe-mssql-console -y -n -q \"SET NOCOUNT ON; $1\"" | ghe_ssh_mssql /bin/bash | sed 's/^[[:space:]]*//;s/[[:space:]]*$//'
}
get_latest_backup_file() {
backups_dir=$1
db=$2
ext=$3
latest_full_backup=$(find "$backups_dir" -type f -name "$db*.$ext" | grep -E '[0-9]{8}T[0-9]{6}' | sort | tail -n 1)
latest_full_backup_file="${latest_full_backup##*/}"
echo "$latest_full_backup_file"
}
get_backup_val() {
db=$1
filename=$2
column=$3
run_query "
SELECT s.$column
FROM msdb.dbo.backupset s
JOIN msdb.dbo.backupmediafamily f
ON s.media_set_id = f.media_set_id
WHERE s.database_name = '$db' AND f.physical_device_name LIKE '%$filename'"
}
get_backup_checkpoint_lsn() {
get_backup_val "$1" "$2" "checkpoint_lsn"
}
get_backup_last_lsn() {
get_backup_val "$1" "$2" "last_lsn"
}
get_next_log_backup_starting_lsn() {
db=$1
# last_log_backup_lsn: The starting log sequence number of the next log backup
# https://docs.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-database-recovery-status-transact-sql
run_query "
SELECT last_log_backup_lsn
FROM sys.database_recovery_status drs
JOIN sys.databases db on drs.database_id = db.database_id
WHERE db.name = '$db'"
}
get_next_diff_backup_base_lsn() {
db=$1
# differential_base_lsn: Base for differential backups. Data extents changed after this LSN will be included in a differential backup.
# https://docs.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-master-files-transact-sql
run_query "
SELECT differential_base_lsn
FROM sys.master_files mf
WHERE mf.name = '$db'"
}
last_mssql=$GHE_DATA_DIR/current/mssql
if [ ! -d "$last_mssql" ] \
|| [ -z "$(find "$last_mssql" -type f -name '*.bak' | head -n 1)" ]; then
ghe_verbose "Taking first full backup"
backup_type="full"
else
ensure_same_dbs "$last_mssql"
# Check schedule to determine backup type
IFS=',' read -ra cadence <<< "$GHE_MSSQL_BACKUP_CADENCE"
current=$(date -u +%Y%m%d%H%M%S)
full=$(find "$last_mssql" -type f -name "*.bak" | head -n 1)
full=$(find_timestamp "$full")
full_expire=$(add_minute "$full" "${cadence[0]}")
full_expire="${full_expire//T}"
diff=$(find "$last_mssql" -type f -name "*.diff" | head -n 1)
if [ -f "$diff" ]; then
diff=$(find_timestamp "$diff")
diff_expire=$(add_minute "$diff" "${cadence[1]}")
diff_expire="${diff_expire//T}"
else
diff_expire=$(add_minute "$full" "${cadence[1]}")
diff_expire="${diff_expire//T}"
fi
tran=$(find "$last_mssql" -type f -name "*.log" | grep -E '[0-9]{8}T[0-9]{6}' | sort | tail -1)
tran=$(find_timestamp "$tran")
tran_expire=$(add_minute "$tran" "${cadence[2]}")
tran_expire="${tran_expire//T}"
ghe_verbose "current $current, full expire $full_expire, \
diff expire $diff_expire, tran expire $tran_expire"
# Determine the type of backup to take based on expiry time
if [ "$current" -gt "$full_expire" ]; then
backup_type='full'
elif [ "$current" -gt "$diff_expire" ]; then
backup_type='diff'
elif [ "$current" -gt "$tran_expire" ]; then
backup_type='transaction'
fi
# Upgrade to a full backup if the diff/transaction backup might not be restorable due to other backup mechanisms interfering
# with the transaction LSN chain or differential base LSN.
if [ "$backup_type" == 'diff' ] || [ "$backup_type" == 'transaction' ]; then
ghe_verbose "Checking for conflicting backups to ensure a $backup_type backup is sufficient"
for db in $(actions_dbs); do
# Ensure that a diff backup will be based on the full backup file we have (rather than one another backup mechanism took)
if [ "$backup_type" == 'diff' ]; then
full_backup_file=$(get_latest_backup_file "$last_mssql" "$db" "bak")
if [[ "$full_backup_file" == "" ]]; then
log_warn "Taking a full backup instead of a diff backup because for $db a full backup file wasn't found"
backup_type="full"
break
fi
full_backup_file_checkpoint_lsn=$(get_backup_checkpoint_lsn "$db" "$full_backup_file")
if [[ "$full_backup_file_checkpoint_lsn" = "NULL" ]] || [[ "$full_backup_file_checkpoint_lsn" == "" ]]; then
log_warn "Taking a full backup instead of a diff backup because for $db the checkpoint LSN for $full_backup_file couldn't be determined"
backup_type="full"
break
fi
next_diff_backup_base_lsn=$(get_next_diff_backup_base_lsn "$db")
if [[ "$next_diff_backup_base_lsn" = "NULL" ]] || [[ "$next_diff_backup_base_lsn" == "" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db the base LSN for the next diff backup couldn't be determined"
backup_type="full"
break
fi
# The base of the diff backup we're about to take must exactly match the checkpoint LSN of the full backup file we have
if [[ "$next_diff_backup_base_lsn" -ne "$full_backup_file_checkpoint_lsn" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db the diff would have base LSN $next_diff_backup_base_lsn yet our full backup has checkpoint LSN $full_backup_file_checkpoint_lsn"
backup_type="full"
break
fi
fi
# Ensure that a transaction log backup will immediately follow the previous one
latest_log_backup_file=$(get_latest_backup_file "$last_mssql" "$db" "log")
if [[ "$latest_log_backup_file" == "" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db a previous transaction log backup wasn't found"
backup_type="full"
break
fi
latest_log_backup_last_lsn=$(get_backup_last_lsn "$db" "$latest_log_backup_file")
if [[ "$latest_log_backup_last_lsn" = "NULL" ]] || [[ "$latest_log_backup_last_lsn" == "" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db the LSN range for $latest_log_backup_file couldn't be determined"
backup_type="full"
break
fi
next_log_backup_starting_lsn=$(get_next_log_backup_starting_lsn "$db")
if [[ "$next_log_backup_starting_lsn" = "NULL" ]] || [[ "$next_log_backup_starting_lsn" == "" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db the starting LSN for the next log backup couldn't be determined"
backup_type="full"
break
fi
# The starting LSN of the backup we're about to take must be equal to (or before) the last LSN from the last backup,
# otherwise there'll be a gap and the logfiles won't be restorable
if [[ "$next_log_backup_starting_lsn" -gt "$latest_log_backup_last_lsn" ]]; then
log_warn "Taking a full backup instead of a $backup_type backup because for $db a gap would exist between the last backup ending at LSN $latest_log_backup_last_lsn and next backup starting at $next_log_backup_starting_lsn"
backup_type="full"
break
fi
done
fi
fi
# Make sure root backup dir exists if this is the first run
mkdir -p "$backup_dir"
# Use hard links to "copy" over previous applicable backups to the new snapshot folder to save disk space and time
if [ -d "$last_mssql" ]; then
for p in "$last_mssql"/*
do
[[ -e "$p" ]] || break
filename="${p##*/}"
extension="${filename##*.}"
transfer=
# Copy full backups unless we're taking a new full backup
if [ "$extension" = "bak" ] && [ "$backup_type" != 'full' ]; then
transfer=1
fi
# Copy diff backups unless we're taking a new full or diff backup
if [ "$extension" = "diff" ] && [ "$backup_type" != 'full' ] && [ "$backup_type" != 'diff' ]; then
transfer=1
fi
# Copy transaction log backups unless we're taking a new full or diff backup
if [ "$extension" = "log" ] && [ "$backup_type" != 'full' ] && [ "$backup_type" != 'diff' ]; then
transfer=1
fi
if [ -n "$transfer" ]; then
ghe_verbose "Creating hard link to $filename"
ln "$last_mssql"/"$filename" "$backup_dir"/"$filename"
fi
done
fi
if [ -n "$backup_type" ]; then
ghe_verbose "Taking $backup_type backup"
backup_command='ghe-export-mssql'
if [ "$backup_type" = "diff" ]; then
backup_command='ghe-export-mssql -d'
elif [ "$backup_type" = "transaction" ]; then
backup_command='ghe-export-mssql -t'
fi
backup_failed=
bm_start "$(basename "$0")"
# record if generating the backup failed, this will allow us to collect any backups that may have been produced, even if they are not complete they are better than nothing
ghe_ssh_mssql -- "$backup_command" || backup_failed='true'
bm_end "$(basename "$0")"
# Configure the backup cadence on the appliance, which is used for diagnostics
ghe_ssh_mssql "ghe-config mssql.backup.cadence $GHE_MSSQL_BACKUP_CADENCE"
# Transfer backup files from appliance to backup host
appliance_dir="$GHE_REMOTE_DATA_DIR/user/mssql/backups"
backups=$(echo "set -o pipefail; if sudo test -d \"$appliance_dir\"; then \
sudo ls \"$appliance_dir\"; fi" | ghe_ssh_mssql /bin/bash)
for b in $backups
do
ghe_verbose "Transferring to backup host $b"
ghe_ssh_mssql "sudo cat $appliance_dir/$b" > "$backup_dir"/"$b"
done
if [ -n "$backup_failed" ]; then
log_error 'ghe-export-mssql failed to backup at least one mssql database' 1>&2
exit 1
fi
fi

Просмотреть файл

@ -1,48 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-mysql <host>
#/ Backup MySQL from a GitHub instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
if is_external_database_target; then
if [ -n "$EXTERNAL_DATABASE_BACKUP_SCRIPT" ]; then
log_info "Backing up external MySQL database using customer-provided script..."
$EXTERNAL_DATABASE_BACKUP_SCRIPT
bm_end "$(basename $0)"
exit 0
else
if is_binary_backup_feature_on; then
log_warn "Binary backups are configured on the target environment."
log_warn "Binary backup is not supported with an external MySQL database. Backing up using logical backup strategy. Please disable binary backups with 'ghe-config mysql.backup.binary false', or provide a custom backup script using EXTERNAL_DATABASE_BACKUP_SCRIPT"
fi
ghe-backup-mysql-logical
fi
else
if is_binary_backup_feature_on; then
ghe-backup-mysql-binary
else
# if incremental backups are turned on, we can't do them with
# logical backups, so we need to tell the user and exit
is_inc=$(is_incremental_backup_feature_on)
if [ $is_inc = true ]; then
log_warn "Incremental backups are configured on the target environment."
log_warn "Incremental backup is not supported with a logical MySQL backup. Please disable incremental backups with 'ghe-config mysql.backup.incremental false'"
exit 1
fi
ghe-backup-mysql-logical
fi
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,60 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-mysql-binary <host>
#/ Backup MySQL from a GitHub instance using binary backup strategy.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
log_verbose "Backing up MySQL database using binary backup strategy ..."
is_inc=$(is_incremental_backup_feature_on)
if [ $is_inc = true ]; then
log_verbose "Incremental backups are configured on the target environment."
log_info "Performing incremental backup of MySQL database ..." 1>&3
INC_TYPE=$(full_or_incremental_backup)
INC_LSN=""
if [ "$INC_TYPE" == "full" ]; then
log_info "Incremental backup type: $INC_TYPE" 1>&3
INC_LSN=0 # 0 means full backup
else
validate_inc_snapshot_data
log_info "Incremental backup type: $INC_TYPE" 1>&3
INC_LSN=$(retrieve_last_lsn)
fi
echo "set -o pipefail; env INC_BACKUP=$INC_LSN ghe-export-mysql" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash > "$GHE_SNAPSHOT_DIR/mysql.sql.gz"
echo "NO_ADDITIONAL_COMPRESSION" > "$GHE_SNAPSHOT_DIR/mysql-binary-backup-sentinel"
# Ensure that we capture the xtrabackup_checkpoints file from the remote host
log_info "Checking if incremental backup is part of a cluster"
GET_LSN=$(get_cluster_lsn "$GHE_HOSTNAME")
ghe-ssh "$GHE_HOSTNAME" "$GET_LSN" > "$GHE_SNAPSHOT_DIR/xtrabackup_checkpoints"
if [ "$INC_TYPE" == "full" ]; then
log_info "Adding $GHE_SNAPSHOT_DIR to the list of full backups" 1>&3
update_inc_full_backup "$GHE_SNAPSHOT_DIR"
else
log_info "Adding $GHE_SNAPSHOT_DIR to the list of incremental backups" 1>&3
update_inc_snapshot_data "$GHE_SNAPSHOT_DIR"
fi
bm_end "$(basename $0)"
exit 0
fi
# if incremental backup isn't enabled, or we are performing a full backup as part of the process,
# fall through and do a full backup
echo "set -o pipefail; ghe-export-mysql" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash > "$GHE_SNAPSHOT_DIR/mysql.sql.gz"
echo "NO_ADDITIONAL_COMPRESSION" > "$GHE_SNAPSHOT_DIR/mysql-binary-backup-sentinel"
is_inc=$(is_incremental_backup_feature_on)
if [ $is_inc = true ]; then
update_inc_full_backup "$GHE_SNAPSHOT_DIR"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,27 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-mysql-logical <host>
#/ Backup MySQL from a GitHub instance using logical backup strategy.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
log_verbose "Backing up MySQL database using logical backup strategy ..."
echo "set -o pipefail; ghe-export-mysql | pigz" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash > "$GHE_SNAPSHOT_DIR/mysql.sql.gz"
if is_external_database_target; then
echo "LOGICAL_EXTERNAL_BACKUP" > "$GHE_SNAPSHOT_DIR/logical-external-database-backup-sentinel"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,89 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-pages
#/ Take an online, incremental snapshot of all Pages data
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root backup snapshot directory based on config
host="$GHE_HOSTNAME"
backup_dir="$GHE_SNAPSHOT_DIR/pages"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
ssh_config_file_opt=
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
opts="$GHE_EXTRA_SSH_OPTS"
# Pages server hostnames under cluster
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "pages-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
# Make sure root backup dir exists if this is the first run
mkdir -p "$backup_dir"
# Removes the remote sync-in-progress file on exit, re-enabling GC operations
# on the remote instance.
cleanup() {
rm -rf $tempdir
}
trap 'cleanup' EXIT INT
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$GHE_DATA_DIR/current/pages" ] && [ "$(ls -A $GHE_DATA_DIR/current/pages)" ]; then
link_dest="--link-dest=../../current/pages"
fi
count=0
for hostname in $hostnames; do
bm_start "$(basename $0) - $hostname"
echo 1>&3
ghe_verbose "* Starting backup for host: $hostname"
# Sync all auxiliary repository data. This includes files and directories like
# HEAD, audit_log, config, description, info/, etc. No refs or object data
# should be transferred here.
echo 1>&3
ghe_verbose "* Transferring pages files ..."
log_rsync "BEGIN: pages rsync" 1>&3
# Transfer all data from the user data directory using rsync.
ghe-rsync -av \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path='sudo -u git rsync' \
$link_dest \
"$hostname:$GHE_REMOTE_DATA_USER_DIR/pages/" \
"$GHE_SNAPSHOT_DIR/pages" 1>&3
log_rsync "END: pages rsync" 1>&3
bm_end "$(basename $0) - $hostname"
count=$((count + 1))
done
increment-progress-total-count $count
bm_end "$(basename $0)"

Просмотреть файл

@ -1,55 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-redis
#/ Take a snapshot of all Redis data. This is needed because older versions of
#/ the remote side ghe-export-redis command use a blocking SAVE instead of a
#/ non-blocking BGSAVE.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
# Force a redis BGSAVE, and wait for it to complete.
ghe-ssh "$GHE_HOSTNAME" /bin/bash <<EOF
set -e
if which ghe-redis-cli > /dev/null; then
redis_cli=ghe-redis-cli
redis_arg=--remote
else
redis_cli=redis-cli
redis_arg=
fi
redis_host=\$(ghe-config cluster.redis-master 2>/dev/null || echo "localhost")
timestamp=\$(\$redis_cli \$redis_arg -h \$redis_host LASTSAVE)
for i in \$(seq 10); do
if ! \$redis_cli \$redis_arg -h \$redis_host BGSAVE | grep -q ERR; then
break
fi
sleep 15
done
for n in \$(seq 3600); do
if [ "\$(\$redis_cli \$redis_arg -h \$redis_host LASTSAVE)" != "\$timestamp" ]; then
break
fi
sleep 1
done
[ "\$(\$redis_cli \$redis_arg -h \$redis_host LASTSAVE)" != "\$timestamp" ] # exits 1 if bgsave didn't work
if [ "\$redis_host" != "localhost" ]; then
ssh \$redis_host sudo cat '$GHE_REMOTE_DATA_USER_DIR/redis/dump.rdb'
else
sudo cat '$GHE_REMOTE_DATA_USER_DIR/redis/dump.rdb'
fi
EOF
bm_end "$(basename $0)"

Просмотреть файл

@ -1,407 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-repositories
#/ Take an online, incremental snapshot of all Git repository data.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# This command is designed to allow for transferring active Git repository data
# from a GitHub instance to a backup site in a way that ensures data is
# captured in a consistent state even when being written to.
#
# - All Git GC operations are disabled on the GitHub instance for the duration of
# the backup. This removes the possibly of objects or packs being removed
# while the backup is in progress.
#
# - In progress Git GC operations are given a cooldown window to complete. The
# script will sleep for up to 60 seconds waiting for GC operations to finish.
#
# - Git repository data is transferred in a specific order: auxiliary files,
# packed refs, loose refs, reflogs, and finally objects and pack files in that
# order. This ensures that all referenced objects are captured.
#
# - Git GC operations are re-enabled on the GitHub instance.
#
# The script uses multiple runs of rsync to transfer repository files. Each run
# includes a list of filter rules that ensure only specific types of files are
# transferred.
#
# See the "FILTER RULES" and "INCLUDE/EXCLUDE PATTERN RULES" sections of the
# rsync(1) manual for more information:
# <http://rsync.samba.org/ftp/rsync/rsync.html>
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root backup snapshot directory based on config
host="$GHE_HOSTNAME"
backup_dir="$GHE_SNAPSHOT_DIR/repositories"
# Location of last good backup for rsync --link-dest
backup_current="$GHE_DATA_DIR/current/repositories"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
ssh_config_file_opt=
tempdir=$(mktemp -d -t backup-utils-backup-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-backup-XXXXXX)
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
opts="$GHE_EXTRA_SSH_OPTS"
# git server hostnames under cluster
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "git-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
# Replica hostnames for HA
if ghe-ssh "$GHE_HOSTNAME" -- "[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/repl-state' ]"; then
ha_replica_hosts=$(ghe-ssh "$GHE_HOSTNAME" ghe-cluster-nodes --replica)
fi
# Make sure root backup dir exists if this is the first run
mkdir -p "$backup_dir"
# Removes the remote sync-in-progress file on exit, re-enabling GC operations
# on the remote instance.
cleanup() {
for pid in $(jobs -p); do
kill -KILL $pid > /dev/null 2>&1 || true
done
# Enable remote GC operations
for hostname in $hostnames; do
ghe-gc-enable $ssh_config_file_opt $hostname:$port || {
echo "Re-enable gc on $hostname failed, please manually delete $SYNC_IN_PROGRESS_FILE" 1>&2
}
done
# Enable remote GC operations for HA replica
for replica_host in $ha_replica_hosts; do
echo "set -o pipefail; ssh $replica_host -- 'sudo rm -f $SYNC_IN_PROGRESS_FILE'" | ghe-ssh "$host" /bin/bash || {
echo "Re-enable gc on $replica_host failed, please manually delete $SYNC_IN_PROGRESS_FILE" 1>&2
}
done
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $remote_tempdir
rm -rf $tempdir
}
trap 'cleanup' EXIT INT
# Disable remote GC operations
for hostname in $hostnames; do
ghe-gc-disable $ssh_config_file_opt $hostname:$port
done
# Disable remote GC operations for HA replica
# gc_disable is a function defined in ghe-backup-config
# gc_disable is called on the replica node via the primary node, because replica node is not expected to be reachable from backup host. But replica node is expected to be reachable from primary node.
for replica_host in $ha_replica_hosts; do
echo "set -o pipefail; ssh $replica_host -- '$(declare -f gc_disable); gc_disable \"$SYNC_IN_PROGRESS_FILE\" \"$GHE_GIT_COOLDOWN_PERIOD\"'" | ghe-ssh "$host" /bin/bash || {
echo "Disable gc on $replica_host failed" 1>&2
}
done
# If we have a previous increment, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$backup_current" ]; then
link_dest="--link-dest=../../current/repositories"
fi
# Calculate sync routes. This will store the healthy repo paths for each node
#
# This gets a repo path and stores the path in the $node.sync file
# a/nw/a8/3f/02/100000855 dgit-node1 >> dgit-node1.sync
# a/nw/a8/bc/8d/100000880 dgit-node3 >> dgit-node3.sync
# a/nw/a5/06/81/100000659 dgit-node2 >> dgit-node2.sync
# ...
# One route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Generating routes"
echo "github-env ./bin/dgit-cluster-backup-routes > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" -- /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Fetching routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
< $routes_list ghe_debug
bm_end "$(basename $0) - Fetching routes"
bm_start "$(basename $0) - Processing routes"
if [ "$GHE_BACKUP_STRATEGY" != "cluster" ]; then
server=$host
fi
< $routes_list awk -v tempdir="$tempdir" -v server="$server" '{ for(i=2;i<=NF;i++){ server != "" ? host=server : host=$i; print $1 > (tempdir"/"host".rsync") }}'
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "no routes found, skipping repositories backup ..."
exit 0
else
increment-progress-total-count 3
fi
# Transfer repository data from a GitHub instance to the current snapshot
# directory, using a previous snapshot to avoid transferring files that have
# already been transferred. A set of rsync filter rules are provided on stdin
# for each invocation.
rsync_repository_data () {
port=$(ssh_port_part "$1")
host=$(ssh_host_part "$1")
#check if we are syncing from a given file list
if [[ "$2" == *".rsync" ]]; then
files_list="$2"
shift
shift
log_rsync "BEGIN: repositories rsync" 1>&3
ghe-rsync -avr \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
$link_dest "$@" \
--rsync-path='sudo -u git rsync' \
--include-from=- --exclude=\* \
--files-from="$files_list" \
--ignore-missing-args \
"$host:$GHE_REMOTE_DATA_USER_DIR/repositories/" \
"$backup_dir" 1>&3 2>&3
log_rsync "END: repositories rsync" 1>&3
else
shift
log_rsync "BEGIN: repositories rsync" 1>&3
ghe-rsync -avr \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
$link_dest "$@" \
--rsync-path='sudo -u git rsync' \
--include-from=- --exclude=\* \
--ignore-missing-args \
"$host:$GHE_REMOTE_DATA_USER_DIR/repositories/" \
"$backup_dir" 1>&3 2>&3
log_rsync "END: repositories rsync" 1>&3
fi
}
sync_data (){
# Sync all auxiliary repository data. This includes files and directories like
# HEAD, audit_log, config, description, info/, etc. No refs or object data
# should be transferred here.
echo 1>&3
log_info "* Transferring auxiliary files ..." 1>&3
rsync_repository_data $1:122 $2 <<RULES
- /__*__/
- /info/
+ /*/
+ /*/*.git
- /*/*.git/objects
- /*/*.git/refs
- /*/*.git/packed-refs
- /*/*.git/logs
+ /*/*.git/**
+ /*/??/
+ /*/??/??/
+ /*/??/??/??/
+ /*/??/??/??/gist/
+ /*/??/??/??/gist/*.git
- /*/??/??/??/gist/*.git/objects
- /*/??/??/??/gist/*.git/refs
- /*/??/??/??/gist/*.git/packed-refs
- /*/??/??/??/gist/*.git/logs
+ /*/??/??/??/gist/*.git/**
+ /*/nw/??/??/??/
+ /*/nw/??/??/??/*/
+ /*/nw/??/??/??/*/*.git
- /*/nw/??/??/??/*/*.git/objects
- /*/nw/??/??/??/*/*.git/refs
- /*/nw/??/??/??/*/*.git/packed-refs
- /*/nw/??/??/??/*/*.git/logs
+ /*/nw/??/??/??/*/*.git/**
RULES
# Sync packed refs files. This is performed before sync'ing loose refs since
# loose refs trump packed-refs information.
echo 1>&3
log_info "* Transferring packed-refs files ..." 1>&3
rsync_repository_data $1:122 $2 <<RULES
- /__*__/
- /info/
+ /*/
+ /*/*.git
+ /*/*.git/packed-refs
+ /*/??/
+ /*/??/??/
+ /*/??/??/??/
+ /*/??/??/??/gist/
+ /*/??/??/??/gist/*.git
+ /*/??/??/??/gist/*.git/packed-refs
+ /*/nw/??/
+ /*/nw/??/??/
+ /*/nw/??/??/??/
+ /*/nw/??/??/??/*/
+ /*/nw/??/??/??/*/*.git
+ /*/nw/??/??/??/*/*.git/packed-refs
RULES
# Sync loose refs and reflogs. This must be performed before object data is
# transferred to ensure that all referenced objects are included.
echo 1>&3
log_info "* Transferring refs and reflogs ..." 1>&3
rsync_repository_data $1:122 $2 <<RULES
- /__*__/
- /info/
+ /*/
+ /*/*.git
+ /*/*.git/refs
+ /*/*.git/refs/**
+ /*/*.git/logs
+ /*/*.git/logs/**
+ /*/??/
+ /*/??/??/
+ /*/??/??/??/
+ /*/??/??/??/gist/
+ /*/??/??/??/gist/*.git
+ /*/??/??/??/gist/*.git/refs
+ /*/??/??/??/gist/*.git/refs/**
+ /*/??/??/??/gist/*.git/logs
+ /*/??/??/??/gist/*.git/logs/**
+ /*/nw/??/
+ /*/nw/??/??/
+ /*/nw/??/??/??/
+ /*/nw/??/??/??/*/
+ /*/nw/??/??/??/*/*.git
+ /*/nw/??/??/??/*/*.git/refs
+ /*/nw/??/??/??/*/*.git/refs/**
+ /*/nw/??/??/??/*/*.git/logs
+ /*/nw/??/??/??/*/*.git/logs/**
RULES
# Sync git objects and pack files. Compression is disabled during this phase
# since these files are already well compressed.
echo 1>&3
log_info "* Transferring objects and packs ..." 1>&3
rsync_repository_data $1:122 $2 -H <<RULES
- /__*__/
- /info/
+ /*/
+ /*/*.git
+ /*/*.git/objects
- /*/*.git/objects/**/tmp_*
+ /*/*.git/objects/**
+ /*/??/
+ /*/??/??/
+ /*/??/??/??/
+ /*/??/??/??/gist/
+ /*/??/??/??/gist/*.git
+ /*/??/??/??/gist/*.git/objects
- /*/??/??/??/gist/*.git/objects/**/tmp_*
+ /*/??/??/??/gist/*.git/objects/**
+ /*/nw/??/
+ /*/nw/??/??/
+ /*/nw/??/??/??/
+ /*/nw/??/??/??/*/
+ /*/nw/??/??/??/*/*.git
+ /*/nw/??/??/??/*/*.git/objects
- /*/nw/??/??/??/*/*.git/objects/**/tmp_*
+ /*/nw/??/??/??/*/*.git/objects/**
RULES
echo 1>&3
}
# rsync all the repositories
bm_start "$(basename $0) - Repo sync"
for file_list in $tempdir/*.rsync; do
hostname=$(basename $file_list .rsync)
repo_num=$(< $file_list wc -l)
ghe_verbose "* Transferring $repo_num repositories from $hostname"
sync_data $hostname $file_list &
done
for pid in $(jobs -p); do
wait $pid
done
bm_end "$(basename $0) - Repo sync"
# Since there are no routes for special data directories, we need to do this
# serially for all hostnames. Good candidate for future optimizations.
bm_start "$(basename $0) - Special Data Directories Sync"
for h in $hostnames; do
# Sync __special__ data directories, including the __alambic_assets__,
# __hookshot__, and __purgatory__ directories. The __nodeload_archives__,
# __gitmon__, and __render__ directories are excludes since they act only as
# caches.
#
# Under v2.x and greater, only the special __purgatory__ directory remains under
# /data/repositories. All other special user data directories have been moved under
# the /data/user directory.
echo 1>&3
log_info "* Transferring special data directories from $h..." 1>&3
rsync_repository_data $h:122 <<RULES
- /__nodeload_archives__/
- /__gitmon__/
- /__render__/
+ /__*__/
+ /__*__/**
+ /info/
- /info/lost+found/
+ /info/*
RULES
echo 1>&3
done
bm_end "$(basename $0) - Special Data Directories Sync"
if [ -z "$GHE_SKIP_ROUTE_VERIFICATION" ]; then
bm_start "$(basename $0) - Verifying Routes"
for file_lst in $tempdir/*.rsync; do
< $file_lst sort | uniq
done |sort|uniq > $tempdir/source_routes
(cd $backup_dir/ && find * -mindepth 5 -maxdepth 6 -type d -name \*.git | fix_paths_for_ghe_version | uniq | sort | uniq) > $tempdir/destination_routes
git --no-pager diff --unified=0 --no-prefix -- $tempdir/source_routes $tempdir/destination_routes || echo "Warning: One or more repository networks and/or gists were not found on the source appliance."
increment-progress-total-count 1
bm_end "$(basename $0) - Verifying Routes"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,186 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-secrets <host>
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Grab the host
host="$GHE_HOSTNAME"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Function to backup a secret setting to a file.
# backup-secret <description> <file-name> <setting-name> [--best-effort]
backup-secret() {
best_effort=false
description=""
file=""
setting=""
count=0
while [ $# -gt 0 ]; do
case "$1" in
--best-effort)
shift 1
best_effort=true
;;
*)
case $count in
0)
description=$1
;;
1)
file=$1
;;
2)
setting=$1
;;
*)
>&2 echo "Too many arguments"
;;
esac
count=$((count+1))
shift 1
esac
done
log_info "* Transferring $description ..." 1>&3
ghe-ssh "$host" -- ghe-config "$setting" > "$file+" || (
if [ "$best_effort" = "false" ]; then
echo "Info: $description not set. Skipping..." >&2
fi
)
if [ -n "$(cat "$file+")" ]; then
mv "$file+" "$file"
else
unlink "$file+"
fi
}
bm_start "$(basename $0)"
# Create the snapshot directory if needed and change into it.
mkdir -p "$GHE_SNAPSHOT_DIR"
cd "$GHE_SNAPSHOT_DIR"
log_info "* Transferring secrets data ..." 1>&3
backup-secret "management console password" "manage-password" "secrets.manage"
backup-secret "password pepper" "password-pepper" "secrets.github.user-password-secrets"
backup-secret "kredz.credz HMAC key" "kredz-credz-hmac" "secrets.kredz.credz-hmac-secret"
backup-secret "kredz.varz HMAC key" "kredz-varz-hmac" "secrets.kredz.varz-hmac-secret"
# backup encryption keying material and create backup value current encryption for GHES 3.7.0 onwards
# this is for forwards compatibility with GHES 3.8.0 onwards
if [ "$(version $GHE_REMOTE_VERSION)" -ge "$(version 3.7.0)" ]; then
backup-secret "encrypted column encryption keying material" "encrypted-column-encryption-keying-material" "secrets.github.encrypted-column-keying-material"
cat "$GHE_SNAPSHOT_DIR/encrypted-column-encryption-keying-material" | sed 's:.*;::' > "$GHE_SNAPSHOT_DIR/encrypted-column-current-encryption-key"
fi
if [ "$(version $GHE_REMOTE_VERSION)" -ge "$(version 3.8.0)" ]; then
backup-secret "secret scanning encrypted secrets current storage key" "secret-scanning-encrypted-secrets-current-storage-key" "secrets.secret-scanning.encrypted-secrets-current-storage-key"
backup-secret "secret scanning encrypted secrets delimited storage keys" "secret-scanning-encrypted-secrets-delimited-storage-keys" "secrets.secret-scanning.encrypted-secrets-delimited-storage-keys"
backup-secret "secret scanning encrypted secrets current shared transit key" "secret-scanning-encrypted-secrets-current-shared-transit-key" "secrets.secret-scanning.encrypted-secrets-current-shared-transit-key"
backup-secret "secret scanning encrypted secrets delimited shared transit keys" "secret-scanning-encrypted-secrets-delimited-shared-transit-keys" "secrets.secret-scanning.encrypted-secrets-delimited-shared-transit-keys"
fi
if [ "$(version $GHE_REMOTE_VERSION)" -ge "$(version 3.11.0)" ]; then
backup-secret "secret scanning encrypted content keys" "secret-scanning-user-content-delimited-encryption-root-keys" "secrets.secret-scanning.secret-scanning-user-content-delimited-encryption-root-keys"
fi
# Backup argon secrets for multiuser from ghes version 3.8 onwards
if [[ "$(version $GHE_REMOTE_VERSION)" -ge "$(version 3.8.0)" && "$(version $GHE_REMOTE_VERSION)" -lt "$(version 3.8.2)" ]]; then
backup-secret "management console argon2 secret" "manage-argon-secret" "secrets.manage-auth.argon-secret"
fi
# Backup external MySQL password if running external MySQL DB.
if is_service_external 'mysql'; then
backup-secret "external MySQL password" "external-mysql-password" "secrets.external.mysql"
fi
# Backup Actions settings.
if ghe-ssh "$host" -- ghe-config --true app.actions.enabled; then
backup-secret "Actions configuration database login" "actions-config-db-login" "secrets.actions.ConfigurationDatabaseSqlLogin"
backup-secret "Actions configuration database password" "actions-config-db-password" "secrets.actions.ConfigurationDatabaseSqlPassword"
backup-secret "Actions framework access token key secret" "actions-framework-access-token" "secrets.actions.FrameworkAccessTokenKeySecret" --best-effort
backup-secret "Actions Url signing HMAC key primary" "actions-url-signing-hmac-key-primary" "secrets.actions.UrlSigningHmacKeyPrimary"
backup-secret "Actions Url signing HMAC key secondary" "actions-url-signing-hmac-key-secondary" "secrets.actions.UrlSigningHmacKeySecondary"
backup-secret "Actions OAuth S2S signing cert" "actions-oauth-s2s-signing-cert" "secrets.actions.OAuthS2SSigningCert"
backup-secret "Actions OAuth S2S signing key" "actions-oauth-s2s-signing-key" "secrets.actions.OAuthS2SSigningKey"
backup-secret "Actions OAuth S2S signing cert thumbprint" "actions-oauth-s2s-signing-cert-thumbprint" "secrets.actions.OAuthS2SSigningCertThumbprint"
backup-secret "Actions primary encryption cert thumbprint" "actions-primary-encryption-cert-thumbprint" "secrets.actions.PrimaryEncryptionCertificateThumbprint"
backup-secret "Actions AAD cert thumbprint" "actions-aad-cert-thumbprint" "secrets.actions.AADCertThumbprint" --best-effort
backup-secret "Actions delegated auth cert thumbprint" "actions-delegated-auth-cert-thumbprint" "secrets.actions.DelegatedAuthCertThumbprint" --best-effort
backup-secret "Actions runtime service principal cert" "actions-runtime-service-principal-cert" "secrets.actions.RuntimeServicePrincipalCertificate" --best-effort
backup-secret "Actions S2S encryption cert" "actions-s2s-encryption-cert" "secrets.actions.S2SEncryptionCertificate"
backup-secret "Actions secondary encryption cert thumbprint" "actions-secondary-encryption-cert-thumbprint" "secrets.actions.SecondaryEncryptionCertificateThumbprint"
backup-secret "Actions service principal cert" "actions-service-principal-cert" "secrets.actions.ServicePrincipalCertificate" --best-effort
backup-secret "Actions SPS validation cert thumbprint" "actions-sps-validation-cert-thumbprint" "secrets.actions.SpsValidationCertThumbprint"
backup-secret "Actions storage container prefix" "actions-storage-container-prefix" "secrets.actions.storage.container-prefix"
backup-secret "Actions Launch secrets encryption/decryption" "actions-launch-secrets-private-key" "secrets.launch.actions-secrets-private-key"
backup-secret "Actions Launch deployer HMAC key" "actions-launch-deployer-hmac" "secrets.launch.deployer-hmac-secret"
backup-secret "Actions Launch Client id" "actions-launch-client-id" "secrets.launch.client-id"
backup-secret "Actions Launch Client secret" "actions-launch-client-secret" "secrets.launch.client-secret"
backup-secret "Actions Launch receiver webhook secret" "actions-launch-receiver-webhook-secret" "secrets.launch.receiver-webhook-secret"
backup-secret "Actions Launch app private key" "actions-launch-app-private-key" "secrets.launch.app-private-key"
backup-secret "Actions Launch app public key" "actions-launch-app-public-key" "secrets.launch.app-public-key"
backup-secret "Actions Launch app id" "actions-launch-app-id" "secrets.launch.app-id"
backup-secret "Actions Launch app relay id" "actions-launch-app-relay-id" "secrets.launch.app-relay-id"
backup-secret "Actions Launch action runner secret" "actions-launch-action-runner-secret" "secrets.launch.action-runner-secret"
backup-secret "Actions Launch service cert" "actions-launch-azp-app-cert" "secrets.launch.azp-app-cert"
backup-secret "Actions Launch service private key" "actions-launch-app-app-private-key" "secrets.launch.azp-app-private-key"
fi
if ghe-ssh "$host" -- ghe-config --true app.packages.enabled; then
backup-secret "Packages aws access key" "packages-aws-access-key" "secrets.packages.aws-access-key"
backup-secret "Packages aws secret key" "packages-aws-secret-key" "secrets.packages.aws-secret-key"
backup-secret "Packages s3 bucket" "packages-s3-bucket" "secrets.packages.s3-bucket"
backup-secret "Packages storage service url" "packages-service-url" "secrets.packages.service-url"
backup-secret "Packages blob storage type" "packages-blob-storage-type" "secrets.packages.blob-storage-type"
backup-secret "Packages azure connection string" "packages-azure-connection-string" "secrets.packages.azure-connection-string"
backup-secret "Packages azure container name" "packages-azure-container-name" "secrets.packages.azure-container-name"
fi
# Backup Chat Integration settings
if ghe-ssh "$host" -- ghe-config --true app.chatops.enabled; then
backup-secret "Chat Integration MSTeams app id" "chatops-msteams-app-id" "secrets.chatops.msteams.app-id"
backup-secret "Chat Integration MSTeams app password" "chatops-msteams-app-password" "secrets.chatops.msteams.app-password"
backup-secret "Chat Integration MSTeams public endpoint" "chatops-msteams-app-public-endpoint" "secrets.chatops.msteams.public-endpoint"
backup-secret "Chat Integration MSTeams bot handle" "chatops-msteams-bot-handle" "secrets.chatops.msteams.bot-handle"
backup-secret "Chat Integration MSTeams bot name" "chatops-msteams-bot-name" "secrets.chatops.msteams.bot-name"
backup-secret "Chat Integration Slack app id" "chatops-slack-app-id" "secrets.chatops.slack.app-id"
backup-secret "Chat Integration Slack client id" "chatops-slack-client-id" "secrets.chatops.slack.client-id"
backup-secret "Chat Integration Slack client secret" "chatops-slack-client-secret" "secrets.chatops.slack.client-secret"
backup-secret "Chat Integration Slack verification token" "chatops-slack-verification-token" "secrets.chatops.slack.verification-token"
backup-secret "Chat Integration Slack config token" "chatops-slack-config-token" "secrets.chatops.slack.config-token"
backup-secret "Chat Integration Slack public endpoint" "chatops-slack-public-endpoint" "secrets.chatops.slack.public-endpoint"
backup-secret "Chat Integration Slack signing secret" "chatops-slack-signing-secret" "secrets.chatops.slack.signing-secret"
backup-secret "Chat Integration Slack app level token" "chatops-slack-app-level-token" "secrets.chatops.slack.app-level-token"
backup-secret "Chat Integration Slack slack command" "chatops-slack-slash-command" "secrets.chatops.slack.slash-command"
backup-secret "Chat Integration Slack app name" "chatops-slack.app-name" "secrets.chatops.slack.app-name"
backup-secret "Chat Integration Slack socket mode" "chatops-slack.socket-mode" "secrets.chatops.slack.socket-mode"
backup-secret "Chat Integration public endpoint" "chatops-public-endpoint" "secrets.chatops.public-endpoint"
backup-secret "Chat Integration app type" "chatops-app-type" "secrets.chatops.app-type"
backup-secret "Chat Integration app id teams" "chatops-app-id-teams" "secrets.chatops.app-id-teams"
backup-secret "Chat Integration webhook secret teams" "chatops-webhook-secret-teams" "secrets.chatops.webhook-secret-teams"
backup-secret "Chat Integration client secret teams" "chatops-client-secret-teams" "secrets.chatops.client-secret-teams"
backup-secret "Chat Integration clien id teams" "chatops-client-id-teams" "secrets.chatops.client-id-teams"
backup-secret "Chat Integration storage secret" "chatops-storage-secret" "secrets.chatops.storage-secret"
backup-secret "Chat Integration session secret" "chatops-session-secret" "secrets.chatops.session-secret"
backup-secret "Chat Integration app id slack" "chatops-app-id-slack" "secrets.chatops.app-id-slack"
backup-secret "Chat Integration webhook secret slack" "chatops-webhook-secret-slack" "secrets.chatops.webhook-secret-slack"
backup-secret "Chat Integration client secret slack" "chatops-client-secret-slack" "secrets.chatops.client-secret-slack"
backup-secret "Chat Integration client id slack" "chatops-client-id-slack" "secrets.chatops.client-id-slack"
fi
bm_end "$(basename $0)"
exit 0

Просмотреть файл

@ -1,50 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-settings
#/ Backup settings from a snapshot to the given <host>.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
bm_start "$(basename $0)"
# Grab the host
host="$GHE_HOSTNAME"
# Create the snapshot directory if needed and change into it.
mkdir -p "$GHE_SNAPSHOT_DIR"
cd "$GHE_SNAPSHOT_DIR"
log_info "* Transferring settings data ..." 1>&3
ghe-ssh "$host" -- 'ghe-export-settings' > settings.json
log_info "* Transferring license data ..." 1>&3
ghe-ssh "$host" -- "sudo cat '$GHE_REMOTE_LICENSE_FILE'" > enterprise.ghl
if ghe-ssh "$host" -- "test -f $GHE_REMOTE_DATA_USER_DIR/common/idp.crt"; then
log_info "* Transferring SAML keys ..." 1>&3
ghe-ssh $host -- sudo tar -C $GHE_REMOTE_DATA_USER_DIR/common/ -cf - "idp.crt saml-sp.p12" > saml-keys.tar
fi
if ghe-ssh "$host" -- "which ghe-export-ssl-ca-certificates 1>/dev/null"; then
log_info "* Transferring CA certificates ..." 1>&3
ghe-ssh "$host" -- "ghe-export-ssl-ca-certificates" > ssl-ca-certificates.tar
fi
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
log_info "* Transferring cluster configuration ..." 1>&3
if ! ghe-ssh "$host" -- "sudo cat $GHE_REMOTE_CLUSTER_CONF_FILE 2>/dev/null" > cluster.conf; then
log_error "Error: Enterprise Cluster is not configured yet, backup will fail" >&2
exit 1
fi
else
if ghe-ssh "$host" -- "sudo cat $GHE_REMOTE_DATA_USER_DIR/common/uuid 2>/dev/null" > uuid; then
log_info "* Transferring UUID ..." 1>&3
fi
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,179 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-storage
#/ Take an online, incremental snapshot of all Alambic Storage data using the
#/ calculated routes method.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-backup.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
# Set up remote host and root backup snapshot directory based on config
host="$GHE_HOSTNAME"
backup_dir="$GHE_SNAPSHOT_DIR/storage"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
ssh_config_file_opt=
tempdir=$(mktemp -d -t backup-utils-backup-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-backup-XXXXXX)
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
opts="$GHE_EXTRA_SSH_OPTS"
# storage server hostnames under cluster
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ]; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "storage-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
# Replica hostnames for HA
if ghe-ssh "$GHE_HOSTNAME" -- "[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/repl-state' ]"; then
ha_replica_hosts=$(ghe-ssh "$GHE_HOSTNAME" ghe-cluster-nodes --replica)
fi
# Make sure root backup dir exists if this is the first run
mkdir -p "$backup_dir"
# Removes the remote sync-in-progress file on exit, re-enabling GC operations
# on the remote instance.
cleanup() {
# Enable remote maintenance operations
for hostname in $hostnames; do
ghe-gc-enable $ssh_config_file_opt $hostname:$port || {
log_warn "Re-enable gc on $hostname failed, please manually delete $SYNC_IN_PROGRESS_FILE" 1>&2
}
done
# Enable remote GC operations for HA replica
for replica_host in $ha_replica_hosts; do
echo "set -o pipefail; ssh $replica_host -- 'sudo rm -f $SYNC_IN_PROGRESS_FILE'" | ghe-ssh "$host" /bin/bash || {
echo "Re-enable gc on $replica_host failed, please manually delete $SYNC_IN_PROGRESS_FILE" 1>&2
}
done
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $remote_tempdir
rm -rf $tempdir
}
trap 'cleanup' EXIT INT
# Disable remote maintenance operations
for hostname in $hostnames; do
ghe-gc-disable $ssh_config_file_opt $hostname:$port
done
# Disable remote GC operations for HA replica
# gc_disable is a function defined in ghe-backup-config
# gc_disable is called on the replica node via the primary node, because replica node is not expected to be reachable from backup host. But replica node is expected to be reachable from primary node.
for replica_host in $ha_replica_hosts; do
echo "set -o pipefail; ssh $replica_host -- '$(declare -f gc_disable); gc_disable \"$SYNC_IN_PROGRESS_FILE\" \"$GHE_GIT_COOLDOWN_PERIOD\"'" | ghe-ssh "$host" /bin/bash || {
echo "Disable gc on $replica_host failed" 1>&2
}
done
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$GHE_DATA_DIR/current/storage" ] && [ "$(ls -A $GHE_DATA_DIR/current/storage)" ]; then
link_dest="--link-dest=../../current/storage"
fi
# Calculate sync routes. This will store the healthy object paths for each node
#
# This gets a repo path and stores the path in the $node.sync file
# a/nw/a8/3f/02/100000855 storage-server-node1 >> storage-server-node1.sync
# a/nw/a8/bc/8d/100000880 storage-server-node3 >> storage-server-node3.sync
# a/nw/a5/06/81/100000659 storage-server-node2 >> storage-server-node2.sync
# ...
#one route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Generating routes"
echo "github-env ./bin/storage-cluster-backup-routes > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" -- /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Fetching routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
cat $routes_list | ghe_debug
bm_end "$(basename $0) - Fetching routes"
bm_start "$(basename $0) - Processing routes"
if [ "$GHE_BACKUP_STRATEGY" != "cluster" ]; then
server=$host
fi
cat $routes_list | awk -v tempdir="$tempdir" -v server="$server" '{ for(i=2;i<=NF;i++){ server != "" ? host=server : host=$i; print $1 > (tempdir"/"host".rsync") }}'
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "no routes found, skipping storage backup ..."
exit 0
else
increment-progress-total-count 2
fi
# rsync all the storage objects
bm_start "$(basename $0) - Storage object sync"
for file_list in $tempdir/*.rsync; do
hostname=$(basename $file_list .rsync)
storage_user=$(ghe-ssh $ssh_config_file_opt $hostname:$port -- stat -c %U /data/user/storage || echo git)
object_num=$(cat $file_list | wc -l)
ghe_verbose "* Transferring $object_num objects from $hostname"
log_rsync "BEGIN: storage rsync" 1>&3
ghe-rsync -avr \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
$link_dest "$@" \
--rsync-path="sudo -u $storage_user rsync" \
--files-from="$file_list" \
--ignore-missing-args \
--size-only \
"$hostname:$GHE_REMOTE_DATA_USER_DIR/storage/" \
"$backup_dir" 1>&3 &
log_rsync "END: storage rsync" 1>&3
done
for pid in $(jobs -p); do
wait $pid
done
bm_end "$(basename $0) - Storage object sync"
if [ -z "$GHE_SKIP_ROUTE_VERIFICATION" ]; then
bm_start "$(basename $0) - Verifying Routes"
cat $tempdir/*.rsync | uniq | sort | uniq > $tempdir/source_routes
(cd $backup_dir/ && find * -mindepth 3 -maxdepth 3 -type f -print | uniq | sort | uniq) > $tempdir/destination_routes
git --no-pager diff --unified=0 --no-prefix -- $tempdir/source_routes $tempdir/destination_routes || echo "Warning: One or more storage objects were not found on the source appliance."
increment-progress-total-count 1
bm_end "$(basename $0) - Verifying Routes"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,23 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-store-version
#/ Stores information about the used version of backup-utils on <host>
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0)"
version_info="$BACKUP_UTILS_VERSION"
if [ -d $GHE_BACKUP_ROOT/.git ]; then
ref=$(git --git-dir=$GHE_BACKUP_ROOT/.git rev-parse HEAD || true)
if [ -n "$ref" ]; then
version_info="$version_info:$ref"
fi
fi
echo "$version_info" |
ghe-ssh "$GHE_HOSTNAME" -- "sudo dd of=$GHE_REMOTE_DATA_USER_DIR/common/backup-utils-version >/dev/null 2>&1"
bm_end "$(basename $0)"

Просмотреть файл

@ -1,20 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-strategy
#/
#/ Determine the backup strategy that will be used.
#/
#/ The rsync strategy should be used for single VMs and all HA configurations.
#/
#/ The cluster strategy should be used to backup GHE clusters.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
if ghe-ssh "$GHE_HOSTNAME" -- \
"[ -f '$GHE_REMOTE_ROOT_DIR/etc/github/cluster' ] && [ ! -f '$GHE_REMOTE_ROOT_DIR/etc/github/repl-state' ]"; then
echo "cluster"
else
echo "rsync"
fi

Просмотреть файл

@ -1,64 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-backup-userdata <dirname>
#/ Take an online, incremental snapshot of a user data directory. This is used
#/ for a number of different simple datastores kept under /data/user on the
#/ remote appliance, including: hookshot, alambic_assets, and pages data.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
bm_start "$(basename $0) - $1"
# Verify rsync is available.
if ! rsync --version 1>/dev/null 2>&1; then
log_error "rsync not found." 1>&2
exit 1
fi
# Grab the host and /data/user directory name.
host="$GHE_HOSTNAME"
dirname="$1"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Verify that the user data directory exists. Bail out if not, which may be due
# to an older version of GHE or no data has been added to this directory yet.
ghe-ssh "$host" -- "sudo -u git [ -d '$GHE_REMOTE_DATA_USER_DIR/$dirname' ]" || exit 0
# If we have a previous increment and it is not empty, avoid transferring existing files via rsync's
# --link-dest support. This also decreases physical space usage considerably.
if [ -d "$GHE_DATA_DIR/current/$dirname" ] && [ "$(ls -A $GHE_DATA_DIR/current/$dirname)" ]; then
subdir=$dirname
link_path=".."
while true; do
if [ "$(dirname $subdir)" = "." ]; then
break
fi
if [ "$(dirname $subdir)" = "/" ]; then
break
fi
link_path="../$link_path"
subdir=$(dirname $subdir)
done
link_dest="--link-dest=../${link_path}/current/$dirname"
fi
# Ensure target directory exists, is needed with subdirectories
mkdir -p "$GHE_SNAPSHOT_DIR/$dirname"
log_rsync "BEGIN: userdata rsync" 1>&3
# Transfer all data from the user data directory using rsync.
ghe-rsync -av \
-e "ghe-ssh -p $(ssh_port_part "$host")" \
--rsync-path='sudo -u git rsync' \
$link_dest \
"$(ssh_host_part "$host"):$GHE_REMOTE_DATA_USER_DIR/$dirname/" \
"$GHE_SNAPSHOT_DIR/$dirname" 1>&3
log_rsync "END: userdata rsync" 1>&3
bm_end "$(basename $0) - $1"

Просмотреть файл

@ -1,41 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-cluster-find-nodes <host> <prefix>
#/
#/ Finds all nodes of the cluster using the config on <host>.
#/ If it is a 2.8 and later cluster version the results are returned as
#/ prefix-uuid, otherwise the configured hostnames are returned.
#/ Also filters nodes based on the prefix role.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-backup-* and ghe-restore-* commands in cluster environments.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Check if the REMOTE DATA USER directory is set
if [ -z $GHE_REMOTE_DATA_USER_DIR ]; then
log_error "Env variable GHE_REMOTE_DATA_USER_DIR is not set. Exiting"
exit 1
fi
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
GHE_HOSTNAME="$1"
prefix="$2"
role=$(echo "$prefix" | cut -d '-' -f1)
if ghe-ssh "$GHE_HOSTNAME" test -f $GHE_REMOTE_ROOT_DIR/etc/github/cluster; then
node_uuids=$(ghe-ssh "$GHE_HOSTNAME" ghe-cluster-nodes -r "$role" -u | cut -f 2)
hostnames=''
for uuid in $node_uuids; do
hostnames+="$prefix-$uuid "
done
else
uuid=$(ghe-ssh "$GHE_HOSTNAME" cat $GHE_REMOTE_DATA_USER_DIR/common/uuid)
hostnames="$prefix-$uuid"
fi
echo "$hostnames"

Просмотреть файл

@ -1,140 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-detect-leaked-ssh-key [-s <snapshot-id>]
#/
#/ This utility will check each snapshot's existing SSH host keys against the list
#/ of known leaked SSH host keys from GitHub Enterprise packages.
#/
#/ OPTIONS:
#/ -h | --help Show this message.
#/ -s |--snapshot <snapshot-id> Scan the snapshot with the given id.
#/ Available snapshots may be listed under the data directory.
#/
set -e
usage() {
grep '^#/' < "$0" | cut -c 4-
exit 2
}
TEMPDIR=$(mktemp -d)
while [ $# -gt 0 ]; do
case "$1" in
-h|--help)
usage
;;
-s|--snapshot)
snapshot=$2
shift
;;
*)
usage
;;
esac
shift
done
ppid_script=$(ps -o args= $PPID 2>/dev/null | awk '{print $2}')
if [ -n "$ppid_script" ]; then
ppid_name=$(basename $ppid_script)
fi
sshkeygen_multiple_hash_formats=false
if (ssh-keygen -E 2>&1 | head -1 | grep -q 'option requires an argument'); then
sshkeygen_multiple_hash_formats=true
fi
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
FINGERPRINT_BLACKLIST="${FINGERPRINT_BLACKLIST:-$(cat "$GHE_BACKUP_ROOT/share/github-backup-utils/ghe-ssh-leaked-host-keys-list.txt")}"
keys="ssh_host_dsa_key.pub ssh_host_ecdsa_key.pub ssh_host_ed25519_key.pub ssh_host_rsa_key.pub"
# Get all the host ssh keys tar from all snapshots directories
if [ -n "$snapshot" ]; then
if [ ! -d "$snapshot" ]; then
echo "Invalid snapshot directory: $snapshot" >&2
exit 1
fi
ssh_tars=$(find "$snapshot" -maxdepth 1 -type f -iname 'ssh-host-keys.tar')
else
ssh_tars=$(find "$GHE_DATA_DIR" -maxdepth 2 -type f -iname 'ssh-host-keys.tar')
fi
# Store the current backup snapshot folder
if [ -L "$GHE_DATA_DIR/current" ]; then
current_dir=$(cd "$GHE_DATA_DIR/current"; pwd -P)
fi
leaked_keys_found=false
leaked_keys_skippedcheck=false
current_bkup=false
for tar_file in $ssh_tars; do
for key in $keys; do
if tar -tvf "$tar_file" $key &>/dev/null; then
tar -C $TEMPDIR -xvf "$tar_file" $key &>/dev/null
if $sshkeygen_multiple_hash_formats; then
fingerprint=$(ssh-keygen -l -E md5 -f $TEMPDIR/$key | cut -d ' ' -f 2 | cut -f2- -d':')
else
fingerprint=$(ssh-keygen -lf $TEMPDIR/$key | cut -d ' ' -f 2)
fi
if [ -z "$fingerprint" ]; then
leaked_keys_skippedcheck=true
elif echo "$FINGERPRINT_BLACKLIST" | grep -q "$fingerprint"; then
leaked_keys_found=true
if [ "$current_dir" == "$(dirname "$tar_file")" ]; then
current_bkup=true
log_warn "* Leaked key found in current backup snapshot."
else
log_warn "* Leaked key found in backup snapshot."
fi
echo "* Snapshot file: $tar_file"
echo "* Key file: $key"
echo "* Key: $fingerprint"
echo
fi
fi
done
done
if $leaked_keys_found; then
if echo "$ppid_name" | grep -q 'ghe-restore'; then
echo
echo "* The snapshot that is being restored contains a leaked SSH host key."
echo "* We recommend rolling the SSH host keys after completing the restore."
echo "* Roll the keys either manually or with ghe-ssh-roll-host-keys on the appliance."
echo "* (An upgrade may be required)"
echo
elif echo "$ppid_name" | grep -q 'ghe-backup'; then
echo "* The current backup contains leaked SSH host keys."
echo "* We strongly recommend rolling your SSH host keys and making a new backup."
echo "* Roll the keys either manually or with ghe-ssh-roll-host-keys on the appliance."
echo "* (An upgrade may be required)"
else
if $current_bkup; then
echo "* The current backup contains leaked SSH host keys."
echo "* Current backup directory: $current_dir"
echo "* We strongly recommend rolling your SSH host keys and making a new backup."
echo "* Roll the keys either manually or with ghe-ssh-roll-host-keys on the appliance."
echo "* (An upgrade may be required)"
fi
echo
echo "* One or more older backup snapshots contain leaked SSH host keys."
echo "* No immediate action is needed but when you use one of these older snapshots for a restore, "
echo "* please make sure to roll the SSH host keys after restore."
echo "* Roll the keys either manually or with ghe-ssh-roll-host-keys on the appliance."
echo "* (An upgrade may be required)"
echo
fi
else
if $leaked_keys_skippedcheck; then
log_info "* No result - check not performed since host key fingerprint was empty"
else
log_info "* No leaked keys found"
fi
fi
# Cleanup temp dir
rm -rf $TEMPDIR

Просмотреть файл

@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -e
PATH=$PATH:/backup-utils/bin
mkdir -p /etc/github-backup-utils
touch /etc/github-backup-utils/backup.config
env | grep ^GHE_ | sed -r "s/(.[^=]+)=(.*)/\1=\"\2\"/g" >> /etc/github-backup-utils/backup.config
exec "$@"

Просмотреть файл

@ -1,42 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-gc-disable [<option>...] <host>
#/
#/ Helper to disable and drain GC operations on a GitHub Enterprise server.
#/
#/ OPTIONS:
#/ -F <configfile> Alternative SSH per-user configuration file.
#/
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
while true; do
case "$1" in
-F)
opts="$1 $2"
shift 2
;;
*)
host="$1"
shift
break
;;
esac
done
# Show usage with no host
[ -z "$host" ] && print_usage
# Exit early when testing
[ -n "$GHE_TEST_REMOTE_VERSION" ] && exit 0
# gc_disable is a function defined in ghe-backup-config
echo "set -o pipefail; $(declare -f gc_disable); gc_disable \"$SYNC_IN_PROGRESS_FILE\" \"$GHE_GIT_COOLDOWN_PERIOD\"" | ghe-ssh $opts "$host" -- /bin/bash || {
res=$?
if [ $res = 7 ]; then
log_error "Error: Git GC processes remain after $GHE_GIT_COOLDOWN_PERIOD seconds. Aborting..." 1>&2
fi
exit $res
}

Просмотреть файл

@ -1,35 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-gc-enable [<option>...] <host>
#/
#/ Helper to enable GC operations on a GitHub Enterprise server.
#/
#/ OPTIONS:
#/ -F <configfile> Alternative SSH per-user configuration file.
#/
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
while true; do
case "$1" in
-F)
opts="$1 $2"
shift 2
;;
*)
host="$1"
shift
break
;;
esac
done
# Show usage with no host
[ -z "$host" ] && print_usage
# Exit early when testing
[ -n "$GHE_TEST_REMOTE_VERSION" ] && exit 0
ghe-ssh $opts "$host" -- "sudo rm -f '$SYNC_IN_PROGRESS_FILE'"

Просмотреть файл

@ -1,380 +0,0 @@
#!/bin/bash
# ghe-incremental-backup-restore
# contains functions used for incremental backups and restores.
# Not called directly, but rather sourced from other scripts.
# Incremental backups are only supported on backup-utils 3.10 and greater.
# INC_FULL_BACKUP is a file that tracks the last full backup that we have done for the current incremental backup cycle.
export INC_FULL_BACKUP="inc_full_backup"
# INC_PREVIOUS_FULL_BACKUP is a file that tracks the last full backup that we have done for the previous incremental backup cycle.
# Kept around for a cycle to ensure that we have a rolling window of incremental backups.
export INC_PREVIOUS_FULL_BACKUP="inc_previous_full_backup"
# PRUNE_FULL_BACKUP is a file that tracks the full backups that need to be pruned.
export PRUNE_FULL_BACKUP="prune_inc_previous_full_backup"
# INC_SNAPSHOT_DATA is a file that tracks the incremental backups that we have done for the current incremental backup cycle.
export INC_SNAPSHOT_DATA="inc_snapshot_data"
# INC_PREVIOUS_SNAPSHOT_DATA is a file that tracks the incremental backups that we have done for the previous incremental backup cycle.
export INC_PREVIOUS_SNAPSHOT_DATA="inc_previous_snapshot_data"
# PRUNE_SNAPSHOT_DATA is a file that tracks the incremental backups that need to be pruned.
export PRUNE_SNAPSHOT_DATA="prune_inc_previous_snapshot_data"
# Check if incremental backups are enabled.
is_incremental_backup_feature_on() {
if [ "$GHE_INCREMENTAL" ]; then
echo "true"
else
echo "false"
fi
}
# Do sanity checks on incremental backups.
incremental_backup_check() {
if $GHE_INCREMENTAL; then
if [ -z "$GHE_INCREMENTAL_MAX_BACKUPS" ]; then
log_error "incremental backups require GHE_INCREMENTAL_MAX_BACKUPS to be set" 1>&2
exit 1
fi
if [ "$GHE_INCREMENTAL_MAX_BACKUPS" -lt 1 ]; then
log_error "GHE_INCREMENTAL_MAX_BACKUPS must be greater than 0" 1>&3
exit 1
fi
fi
}
# initialize incremental backup. We create a file 'inc_snapshot_data'
# in $GHE_DATA_DIR to track the incremental backups that we have done.
# We also create a file called 'inc_full_backup' that tracks the last
# full backup that we have done that the incremental backups are based on.
# If the file does not exist, we create it and leave it blank.
incremental_backup_init() {
if $GHE_INCREMENTAL; then
if [ ! -f "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA" ]; then
touch "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA"
fi
if [ ! -f "$GHE_DATA_DIR/$INC_FULL_BACKUP" ]; then
touch "$GHE_DATA_DIR/$INC_FULL_BACKUP"
fi
fi
}
# if incremental backups are enabled, we check if we have up to max incremental backups
# if we do, if there are no folders with 'inc_previous', we move the current list of
# incremental backups to 'inc_previous_snapshot_data' and 'inc_previous_full_backup'
# using set_previous_incremental_backup. If there are folders with 'inc_previous', we
# prune them using set_prune_incremental_backup.
check_for_incremental_max_backups(){
if $GHE_INCREMENTAL; then
# decrement the number of snapshots by 1 to account for the full backup
if [ "$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA" | wc -l)" -ge "$((GHE_INCREMENTAL_MAX_BACKUPS-1))" ]; then
if [ -z "$(ls -d "$GHE_DATA_DIR"/inc_previous* 2>/dev/null)" ]; then
set_to_inc_previous
else
set_to_prune
set_to_inc_previous
fi
fi
fi
}
# retrieve the lsn of the snapshot directory passed in as an argument
# from xtrabackup_checkpoint which would be in $GHE_DATA_DIR/<supplied_snapshot_dir>
retrieve_lsn(){
local lsn
if $GHE_INCREMENTAL; then
if [ -z "$1" ]; then
log_error "retrieve_lsn requires a snapshot directory to be passed in" 1>&3
exit 1
fi
if [ ! -d "$1" ]; then
log_error "retrieve_lsn requires a valid snapshot directory to be passed in" 1>&3
exit 1
fi
if [ ! -f "$1/xtrabackup_checkpoints" ]; then
log_error "retrieve_lsn requires a valid xtrabackup_checkpoints file in $1" 1>&3
exit 1
fi
lsn=$(grep 'to_lsn' < "$1/xtrabackup_checkpoints" | cut -d' ' -f3)
echo "$lsn"
fi
}
# retrieve the lsn of the last snapshot directory in the file 'inc_snapshot_data'
# use that directory to call the retrieve_lsn function to get the lsn
# if inc_snapshot_data is empty, use the full backup directory to get the lsn
retrieve_last_lsn(){
local lsn backup_data last_snapshot_dir
if $GHE_INCREMENTAL; then
if [ ! -f "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA" ]; then
log_error "retrieve_last_lsn requires a valid inc_snapshot_data file in $GHE_DATA_DIR" 1>&3
exit 1
fi
if [ -z "$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA")" ]; then
backup_data=$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")
lsn=$(retrieve_lsn "$backup_data")
log_info "No incremental backups have been done yet. Using full backup directory $backup_data to get previous lsn ($lsn)" 1>&3
else
last_snapshot_dir=$(tail -n 1 "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA")
log_info "Using incremental directory $last_snapshot_dir to get previous lsn" 1>&3
lsn=$(retrieve_lsn "$last_snapshot_dir")
fi
echo "$lsn"
fi
}
# determine if we need to do a full backup or an incremental backup
# based on the number of snapshots we have and the number of incremental
# backups we have done. If we have done GHE_INCREMENTAL_MAX_BACKUPS
# incremental backups, we do a full backup. Otherwise, we do an incremental
# backup. We also do a full backup if we have not done any backups yet.
# We determine that by checking the value of the file 'inc_full_backup'
# in $GHE_DATA_DIR. If the file is blank, we need to do a full backup.
# If the file exists and points to a directory, then we are doing an incremental.
full_or_incremental_backup(){
if $GHE_INCREMENTAL; then
if [ ! -f "$GHE_DATA_DIR/$INC_FULL_BACKUP" ]; then
echo "full"
elif [ -z "$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")" ]; then
echo "full"
elif [ "$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA" | wc -l)" == "$GHE_INCREMENTAL_MAX_BACKUPS" ]; then
echo "full"
else
echo "incremental"
fi
fi
}
# add snapshot directory to the list of incremental backups we have done
# in the file 'inc_snapshot_data' in $GHE_DATA_DIR.
update_inc_snapshot_data(){
if $GHE_INCREMENTAL; then
if [ -z "$1" ]; then
log_error "update_snapshot_data requires a snapshot directory to be passed in" 1>&3
exit 1
fi
INC_DATA_DIR="$GHE_DATA_DIR/$(basename $1)"
if [ ! -d "$INC_DATA_DIR" ]; then
log_error "update_snapshot_data requires a valid snapshot directory to be passed in" 1>&3
exit 1
fi
echo "$1" >> "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA"
fi
}
# update the file 'inc_full_backup' in $GHE_DATA_DIR to point to the passed in
# snapshot directory. This is the snapshot directory that the incremental backups
# will be based on.
update_inc_full_backup(){
if $GHE_INCREMENTAL; then
if [ -z "$1" ]; then
log_error "update_inc_full_backup requires a snapshot directory to be passed in" 1>&3
exit 1
fi
DIR="$GHE_DATA_DIR/$(basename "$1")"
if [ ! -d "$DIR" ]; then
log_error "update_inc_full_backup requires a valid snapshot directory to be passed in" 1>&3
exit 1
fi
echo "$1" > "$GHE_DATA_DIR/$INC_FULL_BACKUP"
fi
}
# validate that inc_snapshot_data file. For each snapshot directory in the file,
# the directory should exist and its lsn retrieved from xtrabackups_checkpoint
# should be lower than the next snapshot directory in the file. If the lsn is
# not lower, then we have a problem and we warn the user and tell them to perform
# a full backup.
validate_inc_snapshot_data(){
if $GHE_INCREMENTAL; then
local snapshot_data full_data lines snapshot_data_array snapshot_data_array_length i
snapshot_data=$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA")
if [ -z "$snapshot_data" ]; then
log_info "no incremental snapshots yet, will make first incremental from full backup" 1>&3
full_data=$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")
log_info "validating full backup $full_data" 1>&3
snapshot_data="$full_data"
fi
readarray -t snapshot_data_array <<< "$snapshot_data"
snapshot_data_array_length=${#snapshot_data_array[@]}
log_info "$snapshot_data_array_length snapshot directories found in inc_snapshot_data"
i=0
# I would normally use a for loop here, but I need to utilize before
# and after values of the array index to compare the lsn values
while [ $i -lt "$snapshot_data_array_length" ]; do
# if this is the first snapshot directory, we don't need to compare
# it to the previous snapshot directory
if [ "$snapshot_data_array_length" -gt 0 ]; then
local snapshot_dir
snapshot_dir=${snapshot_data_array[$i]}
if [ ! -d "$snapshot_dir" ]; then
log_error "snapshot directory $snapshot_dir does not exist" 1>&3
exit 1
fi
local lsn next_lsn
log_info "retrieving lsn for snapshot directory $snapshot_dir" 1>&3
lsn=$(retrieve_lsn "$snapshot_dir")
if [ $i -lt $((snapshot_data_array_length-1)) ]; then
local next_snapshot_dir
next_snapshot_dir=${snapshot_data_array[$((i+1))]}
next_lsn=$(retrieve_lsn "$next_snapshot_dir")
if [ "$lsn" -ge "$next_lsn" ]; then
log_error "snapshot directory $snapshot_dir has an lsn of $lsn which is greater than or equal to the next snapshot directory $next_snapshot_dir with an lsn of $next_lsn" 1>&2
log_error "incremental backups are invalid. Please perform a full backup" 1>&3
exit 1
fi
log_info "$snapshot_dir lsn = $lsn, $next_snapshot_dir lsn = $next_lsn" 1>&3
fi
i=$((i+1))
else
log_info "retrieving lsn for snapshot directory $snapshot_data" 1>&3
lsn=$(retrieve_lsn "$snapshot_data")
log_info "$snapshot_data is the only incremental snapshot, lsn=$lsn" 1>&3
fi
done
fi
}
# returns whether the supplied directory is in inc_full_backup or not.
# if the directory is in inc_full_backup, then we return true, otherwise
# we return false.
is_full_backup(){
if [ -z "$1" ]; then
log_error "is_full_backup requires a snapshot directory to be passed in, received $1" 1>&3
exit 1
fi
BACKUP_DIR="$GHE_DATA_DIR/$(basename "$1")"
if [ ! -d "$BACKUP_DIR" ]; then
log_error "is_full_backup requires a valid snapshot directory to be passed in, received $1" 1>&3
exit 1
fi
if [ "$1" = "$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")" ]; then
echo "true"
else
echo "false"
fi
}
# returns the full backup directory from the inc_full_backup file
# should ever only be one line in the file
get_full_backup(){
if $GHE_INCREMENTAL; then
backup_dir=$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")
basename "$backup_dir"
fi
}
# retrieve the incremental backups in the list up to and including the passed in
# snapshot directory. If the snapshot directory is not in the list, then we
# return a blank string.
get_incremental_backups(){
if $GHE_INCREMENTAL; then
if [ -z "$1" ]; then
log_error "get_incremental_backups requires a snapshot directory to be passed in" 1>&3
exit 1
fi
if [ ! -d "$1" ]; then
log_error "get_incremental_backups requires a valid snapshot directory to be passed in" 1>&3
exit 1
fi
local incremental_backups
incremental_backups=""
snapshot_data=$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA")
while IFS= read -r line; do
if [[ "$line" == "$1" ]]; then
incremental_backups="$incremental_backups $(basename "$line")"
break
fi
incremental_backups="$incremental_backups $(basename "$line")"
done <<< "$snapshot_data"
echo "$incremental_backups"
fi
}
get_cluster_lsn(){
local GHE_HOSTNAME
GHE_HOSTNAME=$1
ghe-ssh "$GHE_HOSTNAME" "[ -f /etc/github/cluster ] && [ -z \"$LOCAL_MYSQL\" ]"
if [ $? -eq 0 ]; then
local_host=$(ghe-ssh "$GHE_HOSTNAME" "cat /etc/github/cluster")
mysql_master=$(ghe-ssh "$GHE_HOSTNAME" "ghe-config cluster.mysql-master")
if [ "$local_host" != "$mysql_master" ]; then
echo "ssh -p 122 admin@$mysql_master -- sudo cat /tmp/lsndir/xtrabackup_checkpoints"
else
echo "sudo cat /tmp/lsndir/xtrabackup_checkpoints"
fi
else
echo "sudo cat /tmp/lsndir/xtrabackup_checkpoints"
fi
}
# used to set the previous incremental backups.
# takes every directory in $GHE_DATA_DIR/$INC_FULL_BACKUP and
# $GHE_DATA_DIR/$INC_SNAPSHOT_DATA and renames them by prepending
# inc_previous to the beginning. We also change inc_full_backup and
# inc_snapshot_data to inc_previous_full_backup and inc_previous_snapshot_data
set_to_inc_previous(){
log_info "setting previous incremental backups" 1>&3
full_backup=$(cat "$GHE_DATA_DIR/$INC_FULL_BACKUP")
snapshot_data=$(cat "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA")
if [ -n "$full_backup" ]; then
inc_previous_full_backup_dir="inc_previous_$(basename "$full_backup")"
log_info "moving $full_backup to $GHE_DATA_DIR/$inc_previous_full_backup_dir" 1>&3
mv "$full_backup" "$GHE_DATA_DIR/$inc_previous_full_backup_dir"
echo "$GHE_DATA_DIR/$inc_previous_full_backup_dir" > "$GHE_DATA_DIR/$INC_PREVIOUS_FULL_BACKUP"
log_info "removing $GHE_DATA_DIR/$INC_FULL_BACKUP" 1>&3
rm -f "$GHE_DATA_DIR/$INC_FULL_BACKUP"
fi
if [ -n "$snapshot_data" ]; then
while IFS= read -r line; do
local inc_previous_snapshot_dir
inc_previous_snapshot_dir="inc_previous_$(basename "$line")"
log_info "moving $GHE_DATA_DIR/$line to $GHE_DATA_DIR/$inc_previous_snapshot_dir" 1>&3
mv "$line" "$GHE_DATA_DIR/$inc_previous_snapshot_dir"
echo "$GHE_DATA_DIR/$inc_previous_snapshot_dir" >> "$GHE_DATA_DIR/$INC_PREVIOUS_SNAPSHOT_DATA"
done <<< "$snapshot_data"
log_info "removing $GHE_DATA_DIR/$INC_SNAPSHOT_DATA" 1>&3
rm -f "$GHE_DATA_DIR/$INC_SNAPSHOT_DATA"
fi
}
# set directories prepended with "inc_previous" to be prepended with prune
# this enables the directories to be pruned by ghe-snapshot.
# Will prepend prune to each inc_previous folder in $GHE_DATA_DIR
# and will remove $GHE_DATA_DIR/inc_previous_full_backup and
# will remove $GHE_DATA_DIR/inc_previous_snapshot_data
set_to_prune(){
log_info "setting previous incremental backups to be pruned" 1>&3
previous_full_backup=$(cat "$GHE_DATA_DIR/$INC_PREVIOUS_FULL_BACKUP")
previous_snapshot_data=$(cat "$GHE_DATA_DIR/$INC_PREVIOUS_SNAPSHOT_DATA")
if [ -n "$previous_full_backup" ]; then
prune_full_backup_dir="prune_$(basename "$previous_full_backup")"
log_info "moving $GHE_DATA_DIR/$previous_full_backup to $GHE_DATA_DIR/$prune_full_backup_dir" 1>&3
mv "$previous_full_backup" "$GHE_DATA_DIR/$prune_full_backup_dir"
mv "$GHE_DATA_DIR/$INC_PREVIOUS_FULL_BACKUP" "$GHE_DATA_DIR/$PRUNE_FULL_BACKUP"
log_info "removing $GHE_DATA_DIR/inc_previous_full_backup" 1>&3
echo "$GHE_DATA_DIR/$prune_full_backup_dir" >> "$GHE_DATA_DIR/$PRUNE_FULL_BACKUP"
fi
if [ -n "$previous_snapshot_data" ]; then
while IFS= read -r line; do
local prune_snapshot_dir
prune_snapshot_dir="prune_$(basename "$line")"
log_info "moving $GHE_DATA_DIR/$line to $GHE_DATA_DIR/prune_$line" 1>&3
mv "$line" "$GHE_DATA_DIR/$prune_snapshot_dir"
echo "$GHE_DATA_DIR/$prune_snapshot_dir" >> "$GHE_DATA_DIR/$PRUNE_SNAPSHOT_DATA"
done <<< "$previous_snapshot_data"
log_info "removing $GHE_DATA_DIR/$INC_PREVIOUS_SNAPSHOT_DATA" 1>&3
rm -f "$GHE_DATA_DIR/$INC_PREVIOUS_SNAPSHOT_DATA"
fi
}
test_restore_output(){
log_info "$INC_FULL_BACKUP"
log_info "$INC_SNAPSHOT_DATA"
}

Просмотреть файл

@ -1,30 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-maintenance-mode-status <host>
#/ Checks the status of maintenance mode on GitHub appliance at <host>.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Parse args
while true; do
case "$1" in
-*)
echo "ghe-maintenance-mode-enable: illegal argument: $1" 1>&2
exit 1
;;
*)
break
;;
esac
done
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
# Grab host arg
host="$1"
# Check if the maintenance page is present
ghe-ssh "$host" -- test -e "$GHE_REMOTE_DATA_DIR/github/current/public/system/maintenance.html"

Просмотреть файл

@ -1,130 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-prune-snapshots
#/ Keep N latest backup snapshots.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Once we start pruning, this backup will no longer be valid.
# So create or preserve its `incomplete` file and remove the
# `incomplete` file last.
prune_snapshot() {
local prune_dir
while read prune_dir; do
[ -n "$prune_dir" ] || return
# ignore any directory that is included in inc_full_backup or inc_snapshot_data
# the files should be filtered out earlier, but this is a safeguard to make sure.
# inc_previous_* and prune_inc_previous are ignored by default
if [ -f "$GHE_DATA_DIR/inc_full_backup" ]; then
if grep -q "$prune_dir" "$GHE_DATA_DIR"/inc_full_backup; then
log_info "Skipping incremental backup directory: $prune_dir" 1>&3
continue
fi
fi
if [ -f "$GHE_DATA_DIR/inc_snapshot_data" ]; then
if grep -q "$prune_dir" "$GHE_DATA_DIR"/inc_snapshot_data; then
log_info "Skipping incremental backup directory: $prune_dir" 1>&3
continue
fi
fi
# skip if the directory is not a directory or blank
if [ ! -d "$prune_dir" ] || [ -z "$prune_dir" ]; then
log_info "Skipping blank or non-directory: $prune_dir" 1>&3
continue
fi
# Track these steps as they can be difficult to track down if they fail.
log_info "Pruning directory $prune_dir" 1>&3
touch "$prune_dir/incomplete"
if [ $? -ne 0 ]; then
log_info "Failed to create $prune_dir/incomplete" 1>&3
fi
find "$prune_dir" -mindepth 1 -maxdepth 1 -not -path "$prune_dir/incomplete" -print0 | xargs -0 rm -rf
if [ $? -ne 0 ] ; then
log_info "Failed to prune $prune_dir" 1>&3
fi
rm -rf "$prune_dir"
if [ $? -ne 0 ]; then
log_info "Failed to remove $prune_dir" 1>&3
fi
done
}
# Utilize similar logic for incremental backups, except we will only prune directories that start with "prune_". Any directory
# prepended with this will be pruned. Otherwise, we use similar logic to the prune_snapshot function.
prune_incremental_snapshot() {
local incremental_prune_dir
while read incremental_prune_dir; do
if [ -d "$incremental_prune_dir" ]; then
touch "$incremental_prune_dir/incomplete"
find "$incremental_prune_dir" -mindepth 1 -maxdepth 1 -not -path "$incremental_prune_dir/incomplete" -print0 | xargs -0 rm -rf
fi
rm -rf "$incremental_prune_dir"
done
}
# Prune if backup is not running
#if [ ! -f "$GHE_DATA_DIR/in-progress" ] && [ ! -f "$GHE_DATA_DIR/in-progress-restore" ]; then
# Check for backup or restore in-progress file
inprogress_file=$(find $GHE_DATA_DIR -maxdepth 1 -type f \( -name "in-progress" -o -name "in-progress-restore" \) -print -quit)
if [[ "$CALLING_SCRIPT" == "ghe-backup" ]] || [ -z "$inprogress_file" ]; then
# First prune all incomplete / failed snapshot directories
prune_dirs="$(ls -1 "$GHE_DATA_DIR"/[0-9]*/incomplete 2>/dev/null || true)"
prune_num=$(echo "$prune_dirs" | grep -v '^$' | wc -l)
incremental_prune_dirs="$(ls -1 "$GHE_DATA_DIR"/prune* 2>/dev/null || true)"
if [ $prune_num -gt 0 ]; then
log_info Pruning $prune_num "failed snapshot(s) ..."
echo "$prune_dirs" | sed 's@/incomplete$@@' | prune_snapshot
fi
# Now prune all expired snapshots. Keep GHE_NUM_SNAPSHOTS around.
snapshot_count=$(ls -1d "$GHE_DATA_DIR"/[0-9]* 2>/dev/null | wc -l)
if [ "$snapshot_count" -gt "$GHE_NUM_SNAPSHOTS" ]; then
# Get the list of directories that need pruning
dirs_to_prune=""
if [ -f "$GHE_DATA_DIR/inc_full_backup" ]; then
# Read the list of directories from inc_full_backup file into the exclude_list
exclude_list=$(cat "$GHE_DATA_DIR"/inc_full_backup | tr '\n' ' ')
# Add inc_snapshot_data directory to the exclude_list
exclude_list+=" $(cat "$GHE_DATA_DIR"/inc_snapshot_data)"
log_info "Excluding directories from pruning: $exclude_list" 1>&3
scan_dirs="$(ls -1d "$GHE_DATA_DIR"/[0-9]*)"
log_info "Scanning directories: $scan_dirs" 1>&3
dirs_to_prune=$(echo "$scan_dirs" | grep -v -F "$exclude_list" | sort -r | awk "NR > $GHE_NUM_SNAPSHOTS")
else
dirs_to_prune="$(ls -1d "$GHE_DATA_DIR"/[0-9]* | sort -r | awk "NR>$GHE_NUM_SNAPSHOTS")"
fi
# Count the number of directories to be pruned
prune_num=$(echo "$dirs_to_prune" | grep -c '^')
log_info "Pruning $prune_num expired snapshot(s) ..."
log_info "Pruning directories: $dirs_to_prune" 1>&3
echo "$dirs_to_prune" | prune_snapshot
fi
# Prune incremental snapshots afterward
incremental_snapshot_count=$(ls -1d "$GHE_DATA_DIR"/prune* 2>/dev/null | wc -l)
if [ $incremental_snapshot_count -gt 0 ]; then
incremental_prune_dirs="$(ls -1d "$GHE_DATA_DIR"/prune*)"
log_info "Pruning $incremental_snapshot_count stale incremental backups..."
echo "$incremental_prune_dirs" | prune_incremental_snapshot
fi
elif [ "$CALLING_SCRIPT" != "ghe-backup" ] && [ -n "$inprogress_file" ]; then
log_info "Detected a running backup/restore process, please wait until that process is complete to prune expired/incomplete snapshots." 1>&2
log_info "If no such process is running, please remove the "$GHE_DATA_DIR/in-progress*" file and retry again." 1>&2
fi

Просмотреть файл

@ -1,68 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-actions <host>
#/ Restore additional Actions files from an rsync snapshot.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Path to snapshot dir we're restoring from
GHE_RESTORE_SNAPSHOT_PATH="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# No need to restore anything, early exit
if [ ! -d "$GHE_RESTORE_SNAPSHOT_PATH/actions" ]; then
log_warn "Warning: Actions backup missing. Skipping ..."
exit 0
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Transfer all Actions data from the snapshot to the user data directory using rsync.
ghe_verbose "* Transferring Actions files to $host ..."
ghe-ssh -p "$port" "$host" -- sudo mkdir -p "$GHE_REMOTE_DATA_USER_DIR/actions"
ghe-ssh -p "$port" "$host" -- sudo chown -R actions:actions "$GHE_REMOTE_DATA_USER_DIR/actions"
log_rsync "BEGIN: actions rsync" 1>&3
ghe-rsync -arvHR --delete \
-e "ghe-ssh -p $port" \
--rsync-path='sudo -u actions rsync' \
"$GHE_RESTORE_SNAPSHOT_PATH/actions/./" \
"$host:$GHE_REMOTE_DATA_USER_DIR/actions/" 1>&3
log_rsync "END: actions rsync" 1>&3
# Restore Actions settings.
ghe_verbose "* Restoring Actions settings to $host ..."
# Setup the database logins.
ghe_verbose "* Restoring database logins and users to $host ..."
ghe-ssh -p "$port" "$host" -- ghe-actions-console -s mps -c "Repair-DatabaseLogins"
ghe-ssh -p "$port" "$host" -- ghe-actions-console -s token -c "Repair-DatabaseLogins"
ghe-ssh -p "$port" "$host" -- ghe-actions-console -s actions -c "Repair-DatabaseLogins"
if [ ! -z "$(find "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/mssql/" -maxdepth 1 -name 'ArtifactCache_Configuration*.bak')" ]; then
ghe-ssh -p "$port" "$host" -- ghe-actions-console -s artifactcache -c "Repair-DatabaseLogins"
else
log_info "ArtifactCache is not present in mssql backup. Skipping Repair-DatabaseLogins for it."
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,76 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-es-audit-log
#/ Restores a backup of audit logs to Elasticsearch.
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-restore.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ $# -lt 1 ] && print_usage
bm_start "$(basename $0)"
GHE_HOSTNAME="$1"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
indices=$(find $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/audit-log/*.gz -print0 2>/dev/null | xargs -0 -I{} -n1 basename {} .gz)
# Platform neutral and robust method of determining last month
this_yr=$(date +"%Y")
this_mth=$(date +"%-m")
last_mth=$(( $this_mth - 1 ))
last_yr=$this_yr
if [ "$last_mth" = 0 ]; then
last_mth=12
last_yr=$(( $this_yr - 1 ))
fi
last_month=$(printf "audit_log(-[0-9]+)?-%4d-%02d(-[0-9]+)?" $last_yr $last_mth)
current_month=$(printf "audit_log(-[0-9]+)?-%4d-%02d(-[0-9]+)?" $this_yr $this_mth)
tmp_list="$(mktemp -t backup-utils-restore-XXXXXX)"
if is_instance_configured; then
configured=true
fi
# Only restore indices that don't exist and the last two months' indices.
for index in $indices; do
if ! ghe-ssh "$GHE_HOSTNAME" "curl -f -s -XGET http://localhost:9201/$index > /dev/null" || [[ $index =~ $last_month ]] || [[ $index =~ $current_month ]]; then
echo "$index.gz" >> $tmp_list
fi
done
if [ -s "$tmp_list" ]; then
ghe-ssh "$GHE_HOSTNAME" -- "sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore'" 1>&3
ghe-ssh "$GHE_HOSTNAME" -- "sudo chown elasticsearch:elasticsearch '$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore'" 1>&3
log_rsync "BEGIN: es-audit log rsync" 1>&3
ghe-rsync -av --delete \
-e "ghe-ssh -p $(ssh_port_part "$GHE_HOSTNAME")" \
--rsync-path="sudo -u elasticsearch rsync" \
--files-from=$tmp_list \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/audit-log/" \
"$(ssh_host_part "$GHE_HOSTNAME"):$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore/audit-log/" 1>&3
log_rsync "END: es-audit log rsync" 1>&3
if $CLUSTER || [ -n "$configured" ]; then
for index in $(cat $tmp_list | sed 's/\.gz$//g'); do
ghe_verbose "* Restoring $index"
echo "export PATH=\$PATH:/usr/local/share/enterprise && sudo gzip -dc $GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore/audit-log/$index | ghe-es-load-json 'http://localhost:9201/$index'" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash 1>&3
done
else
ghe-ssh "$GHE_HOSTNAME" -- "sudo sh -c 'mv $GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore/audit-log/* $GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore/'" 1>&3
fi
ghe-ssh "$GHE_HOSTNAME" -- "sudo sh -c 'rm -rf $GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore/audit-log/'" 1>&3
rm $tmp_list
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,49 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-es-rsync <host>
#/ Restore an rsync snapshot of all Elasticsearch data to a GitHub instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command when the rsync strategy is used.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# The directory holding the snapshot to restore
snapshot_dir="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
# Transfer all ES data from the latest snapshot to the GitHub instance.
if [ ! -d "$snapshot_dir/elasticsearch" ]; then
echo "Warning: Elasticsearch backup missing. Skipping ..."
exit 0
else
ghe-ssh "$GHE_HOSTNAME" -- "sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore'" 1>&3
ghe-ssh "$GHE_HOSTNAME" -- "sudo chown elasticsearch:elasticsearch '$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore'" 1>&3
log_rsync "BEGIN: elasticsearch rsync" 1>&3
ghe-rsync -av --delete \
-e "ghe-ssh -p $(ssh_port_part "$GHE_HOSTNAME")" \
--rsync-path="sudo -u elasticsearch rsync" \
--copy-dest="$GHE_REMOTE_DATA_USER_DIR/elasticsearch" \
"$snapshot_dir/elasticsearch/" \
"$(ssh_host_part "$GHE_HOSTNAME"):$GHE_REMOTE_DATA_USER_DIR/elasticsearch-restore" 1>&3
log_rsync "END: elasticsearch rsync" 1>&3
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,44 +0,0 @@
#!/usr/bin/env bash
# Usage: ghe-restore-external-database-compatibility-check
# GitHub Enterprise checks for external-database related restores.
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
set -e
# Always allow restoring to unconfigured appliances.
# Additional checks are required if the instance is configured.
if is_instance_configured; then
if internal_database_snapshot_to_external_database; then
# Restoring settings in this scenario would change BYODB state, which is not supported via backup-utils.
if $RESTORE_SETTINGS; then
log_error "Restoring the settings of a snapshot from an appliance using the bundled MySQL service to an appliance using an externally-managed MySQL service is not supported. Please reconfigure the appliance first, then run ghe-restore again."
exit 1
fi
# Restoring interal DB snapshot to BYODB appliance without passing in --skip-mysql is not supported.
if ! $SKIP_MYSQL; then
log_error "Restoring a snapshot from an appliance using the bundled MySQL service to an appliance using an externally-managed MySQL service is not supported. Please migrate the MySQL data beforehand, then run ghe-restore again, passing in the --skip-mysql flag."
exit 1
fi
fi
if external_database_snapshot_to_internal_database; then
# Restoring settings in this scenario would change BYODB state, which is not supported via backup-utils.
if $RESTORE_SETTINGS; then
log_error "Restoring the settings of a snapshot from an appliance using an externally-managed MySQL service to an appliance using the bundled MySQL service is not supported. Please reconfigure the appliance first, then run ghe-restore again."
exit 1
fi
# Restoring BYODB snapshot to internal DB appliance without passing in --skip-mysql is not supported.
if ! $SKIP_MYSQL; then
echo "Restoring a snapshot from an appliance using an externally-managed MySQL service to an appliance using the bundled MySQL service is not supported. Please migrate the MySQL data beforehand, then run ghe-restore again, passing in the --skip-mysql flag."
exit 1
fi
fi
fi

Просмотреть файл

@ -1,99 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-git-hooks <host>
#/ Restore custom Git hooks data from an rsync snapshot
#/
#/ Note: This command typically isn't called directly. It's invoked by
#/ ghe-restore.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
ssh_config_file_opt=
opts="$GHE_EXTRA_SSH_OPTS"
if $CLUSTER; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "git-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
cleanup() {
for pid in $(jobs -p); do
kill -KILL $pid > /dev/null 2>&1 || true
done
rm -rf $tempdir
}
trap 'cleanup' INT TERM EXIT
if [ -d "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/git-hooks/environments/tarballs" ]; then
tarballs=$(cd $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/git-hooks/environments/tarballs && find . -type f)
hostname=$(echo $hostnames | awk '{ print $1; }')
if [ -n "$hostname" ]; then
ghe-ssh $ssh_config_file_opt -l $user "$hostname:122" -- "sudo -u git mkdir -p $GHE_REMOTE_DATA_USER_DIR/git-hooks/environments/tarballs"
log_rsync "BEGIN: git-hooks tarball rsync" 1>&3
ghe-rsync -avH --delete \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path="sudo -u git rsync" \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/git-hooks/environments/tarballs/" \
"$hostname:$GHE_REMOTE_DATA_USER_DIR/git-hooks/environments/tarballs" 1>&3
log_rsync "END: git-hooks rsync" 1>&3
for tarball in $tarballs; do
env_id=$(echo $tarball | cut -d '/' -f 2)
ghe-ssh $ssh_config_file_opt -l $user "$hostname:122" -- "/bin/bash -c 'export PATH=\$PATH:/usr/local/share/enterprise && ghe-hook-env-update $env_id $GHE_REMOTE_DATA_USER_DIR/git-hooks/environments/tarballs/$tarball'" 1>&3 2>&3
done
fi
fi
if [ -d "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/git-hooks/repos" ]; then
for hostname in $hostnames; do
ghe-ssh $ssh_config_file_opt -l $user "$hostname:122" -- "sudo -u git mkdir -p $GHE_REMOTE_DATA_USER_DIR/git-hooks/repos"
log_rsync "BEGIN: git-hooks repos rsync" 1>&3
ghe-rsync -avH --delete \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path="sudo -u git rsync" \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/git-hooks/repos/" \
"$hostname:$GHE_REMOTE_DATA_USER_DIR/git-hooks/repos" 1>&3 &
log_rsync "END: git-hooks repos rsync" 1>&3
done
for pid in $(jobs -p); do
wait $pid
ret_code=$?
if [ "$ret_code" != "0" ]; then
echo "$pid exited $ret_code"
exit $ret_code
fi
done
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,57 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-minio <host>
#/ Restore additional minio files from an rsync snapshot.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/ghe-backup-config"
# Show usage and bail with no arguments
[[ -z ${*} ]] && print_usage
bm_start "$(basename "${0}")"
# Grab host arg
GHE_HOSTNAME="${1}"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: "${GHE_RESTORE_SNAPSHOT:=current}"
# Path to snapshot dir we're restoring from
GHE_RESTORE_SNAPSHOT_PATH="${GHE_DATA_DIR}/${GHE_RESTORE_SNAPSHOT}"
port="$(ssh_port_part "${GHE_HOSTNAME}")"
host="$(ssh_host_part "${GHE_HOSTNAME}")"
# No need to restore anything, early exit
if [ ! -d "${GHE_RESTORE_SNAPSHOT_PATH}/minio" ]; then
log_warn "Warning: minio backup missing. Skipping ..."
exit 0
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "${host}"
# Transfer all minio data from the snapshot to the user data directory using rsync.
ghe_verbose "* Transferring minio files to ${host} ..."
ghe-ssh -p "${port}" "${host}" -- sudo mkdir -p "${GHE_REMOTE_DATA_USER_DIR}/minio"
ghe-ssh -p "${port}" "${host}" -- sudo chown -R minio:minio "${GHE_REMOTE_DATA_USER_DIR}/minio"
log_rsync "BEGIN: minio rsync" 1>&3
ghe-rsync \
--verbose \
--archive \
--hard-links \
--relative \
--delete \
--rsh="ghe-ssh -p ${port}" \
--rsync-path='sudo -u minio rsync' \
"${GHE_RESTORE_SNAPSHOT_PATH}/minio/./" \
"${host}:${GHE_REMOTE_DATA_USER_DIR}/minio/" 1>&3
log_rsync "END: minio rsync" 1>&3
bm_end "$(basename "${0}")"

Просмотреть файл

@ -1,91 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-mssql <host>
#/ Restore MSSQL backup to a GitHub Actions service instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
# Check if the import tool is available in this version
import_tool_available() {
if [ -z "$GHE_TEST_REMOTE_VERSION" ]; then
ghe_ssh_mssql "test -e /usr/local/bin/ghe-import-mssql"
else
ghe_ssh_mssql "type ghe-import-mssql"
fi
}
ghe_ssh_mssql() {
ghe-ssh $opts $ssh_config_file_opt "$GHE_MSSQL_PRIMARY_HOST" "$@"
}
cleanup() {
rm -rf $tempdir
}
trap 'cleanup' EXIT INT
# Grab host arg
GHE_HOSTNAME="$1"
# use the mssql primary host if GHES cluster configuration contains a mssql-master or use the ghe server if the mssql-master is not available.
GHE_MSSQL_PRIMARY_NODE="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.mssql-master" || true)"
GHE_MSSQL_PRIMARY_HOST="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.$GHE_MSSQL_PRIMARY_NODE.hostname" || true)"
if [ -z "$GHE_MSSQL_PRIMARY_HOST" ]; then
GHE_MSSQL_PRIMARY_HOST="$GHE_HOSTNAME"
fi
tempdir=$(mktemp -d -t backup-utils-backup-XXXXXX)
ssh_config_file_opt=
opts=
isHA="$(ghe-ssh "$GHE_HOSTNAME" -- "ghe-config cluster.ha" || true)"
# get server hostnames under cluster and HA
if [ "$GHE_BACKUP_STRATEGY" = "cluster" ] || [ "$isHA" = "true" ] ; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
ghe-ssh-config "$GHE_HOSTNAME" "$GHE_MSSQL_PRIMARY_HOST" > "$ssh_config_file"
fi
if ! import_tool_available; then
ghe_verbose "ghe-import-mssql is not available"
exit 1
fi
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# The directory holding the snapshot to restore
snapshot_dir_mssql="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/mssql"
# Transfer backup files from appliance to backup host
appliance_dir="$GHE_REMOTE_DATA_USER_DIR/mssql/backups"
echo "set -o pipefail; sudo rm -rf $appliance_dir; sudo mkdir -p $appliance_dir" | ghe_ssh_mssql /bin/bash
for b in "$snapshot_dir_mssql"/*
do
[[ -e "$b" ]] || break
filename="${b##*/}"
ghe_verbose "Transferring $filename to appliance host"
cat $snapshot_dir_mssql/$filename | ghe_ssh_mssql "sudo tee -a $appliance_dir/$filename >/dev/null 2>&1"
done
# Change owner to mssql:mssql to ready for restore
ghe_ssh_mssql "sudo chown -R mssql:mssql $appliance_dir"
# Invoke restore command
bm_start "$(basename $0)"
ghe_ssh_mssql -- "ghe-import-mssql" < "/dev/null" 1>&3
bm_end "$(basename $0)"

Просмотреть файл

@ -1,82 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-mysql <host>
#/ Restore MySQL backup to a GitHub instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command when the rsync strategy is used.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
export GHE_RESTORE_SNAPSHOT
# The directory holding the snapshot to restore
GHE_RESTORE_SNAPSHOT_PATH="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
if is_external_database_snapshot; then
if [ -n "$EXTERNAL_DATABASE_RESTORE_SCRIPT" ]; then
$EXTERNAL_DATABASE_RESTORE_SCRIPT
# ensure that haproxy and mysql are ready to accept connections before continuing
if ! ghe-ssh "$GHE_HOSTNAME" -- "/usr/local/share/enterprise/ghe-service-wait-mysql"; then
error_message "Failed to connect to MySQL service!"
exit 2
fi
bm_end "$(basename $0)"
exit 0
else
if is_binary_backup "$GHE_RESTORE_SNAPSHOT_PATH"; then
log_error "Error: Restore of a binary backup to appliance with an external database configured is not supported. Please provide a custom external database restore script with EXTERNAL_DATABASE_RESTORE_SCRIPT"
exit 1
fi
if ! is_default_external_database_snapshot; then
log_error "Error: Backup was not taken with a GitHub provided backup strategy. You must provide a custom restore script for this backup using EXTERNAL_DATABASE_BACKUP_SCRIPT"
exit 1
fi
if is_binary_backup_feature_on; then
log_warn "Warning: Binary backups are configured on the target environment. \nBinary backup is not supported with an external MySQL database. \n\nPlease disable binary backups with 'ghe-config mysql.backup.binary false'"
fi
fi
fi
if is_binary_backup_feature_on; then
# Feature "mysql.backup.binary" is on, which means new backup scripts are available
if is_binary_backup "$GHE_RESTORE_SNAPSHOT_PATH"; then
ghe-restore-mysql-binary $GHE_HOSTNAME
else
ghe-restore-mysql-logical $GHE_HOSTNAME
fi
else
# We do not allow to restore binary backup without "mysql.backup.binary" set
if is_binary_backup "$GHE_RESTORE_SNAPSHOT_PATH"; then
log_error "To restore from a binary backup, you have to set ghe-config \"mysql.backup.binary\" to true" >&2
exit 2
else
if is_default_external_database_snapshot; then
ghe-restore-mysql-logical $GHE_HOSTNAME
else
# legacy mode
ghe-restore-mysql-legacy $GHE_HOSTNAME
fi
fi
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,125 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-mysql-binary <host>
#/ Restore binary MySQL backup to a GitHub instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command when the rsync strategy is used.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
if [ "$GHE_INCREMENTAL" ]; then
echo "Incremental backup is configured."
else
echo "I don't see that incremental backup is configured. $GHE_INCREMENTAL"
fi
#exit 0
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
export GHE_RESTORE_SNAPSHOT
# The directory holding the snapshot to restore
snapshot_dir="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
if $CLUSTER ; then
ghe_mysql_master=$(ghe-ssh "$GHE_HOSTNAME" ghe-config "cluster.mysql-master")
if [ -z $ghe_mysql_master ]; then
echo "Something is wrong with configuration: cluster.mysql-master not found" >&2
exit 2
else
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
ghe-ssh-config "$GHE_HOSTNAME" "$ghe_mysql_master" > "$ssh_config_file"
port=$(ssh_port_part "$GHE_HOSTNAME")
ghe_mysql_master=$ghe_mysql_master${port:+:$port}
fi
else
ghe_mysql_master=$GHE_HOSTNAME
fi
# Check if the decompress needed by looking into the sentinel file
# In 2.19.5 we compress the binary backup twice
if [ "$(cat $snapshot_dir/mysql-binary-backup-sentinel)" = "NO_ADDITIONAL_COMPRESSION" ]; then
IMPORT_MYSQL=ghe-import-mysql-xtrabackup
GHE_RESTORE_HOST=$ghe_mysql_master
else
IMPORT_MYSQL="unpigz | ghe-import-mysql-xtrabackup"
GHE_RESTORE_HOST=$ghe_mysql_master
fi
cleanup() {
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo rm -rf $GHE_REMOTE_DATA_USER_DIR/tmp/*"
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo rm -rf /tmp/incremental-backup-files.txt"
}
trap 'cleanup' INT TERM EXIT
log_info "Creating temporary directory on remote host at $GHE_REMOTE_DATA_USER_DIR/tmp ..." 1>&3
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/tmp'" 1>&3
# If incremental restore is enabled, we need to upload the incremental backup file to the remote host
# We get a list of all the incremental backup files up to the snapshot we want to restore
# If the snapshot happens to be a full backup, we don't need to upload any incremental backup files
# Otherwise we follow this procedure:
# - for each incremental backup, create a directory in the format:
# $GHE_REMOTE_DATA_USER_DIR/tmp/incremental-restore-snapshot-dir/mysql.sql.gz
# - upload the incremental backup file to the directory
is_full=true
is_inc=false
if [ "$GHE_INCREMENTAL" ]; then
is_full=$(is_full_backup "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT")
is_inc=$(is_incremental_backup_feature_on)
fi
if [ "$is_inc" = true ] && [ "$is_full" = false ]; then
log_info "Uploading incremental backup directories to the remote host ..." 1>&3
full_backup_dir=$(get_full_backup)
log_info "Full backup directory: $full_backup_dir" 1>&3
#recreate the incremental-backup-files.txt file
if [ -f "/tmp/incremental-backup-files.txt" ]; then
rm "/tmp/incremental-backup-files.txt"
fi
touch "/tmp/incremental-backup-files.txt"
for incremental_backup in $(get_incremental_backups "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"); do
echo "$incremental_backup" >> "/tmp/incremental-backup-files.txt"
log_info "Incremental files to upload: $incremental_backup" 1>&3
log_info "Creating directory $GHE_REMOTE_DATA_USER_DIR/tmp/$incremental_backup on remote host..." 1>&3
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- " sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/tmp/$incremental_backup'"
log_info "Uploading incremental backup file $GHE_DATA_DIR/$incremental_backup to the remote host ..." 1>&3
cat "$GHE_DATA_DIR/$incremental_backup/mysql.sql.gz" | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- " sudo dd of=$GHE_REMOTE_DATA_USER_DIR/tmp/$incremental_backup/mysql.sql.gz >/dev/null 2>&1"
done
# Transfer the full backup to the remote host
log_info "Uploading full backup file $GHE_DATA_DIR/$full_backup_dir/mysql.sql.gz to the remote host ..." 1>&3
cat $GHE_DATA_DIR/$full_backup_dir/mysql.sql.gz | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo dd of=$GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz >/dev/null 2>&1"
# Pass the list of incremental backup files
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- " sudo dd of=/tmp/incremental-backup-files.txt >/dev/null 2>&1" < "/tmp/incremental-backup-files.txt"
# Restore the full backup and the incremental backup files
log_info "Restoring full backup from $GHE_REMOTE_DATA_USER_DIR/tmp/full/mysql.sql.gz ..." 1>&3
echo "cat $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz | $IMPORT_MYSQL" | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- /bin/bash 1>&3
else
log_info "Uploading $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/mysql.sql.gz MySQL data to the remote host $GHE_RESTORE_HOST in $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz ..." 1>&3
cat $snapshot_dir/mysql.sql.gz | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo dd of=$GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz >/dev/null 2>&1"
log_info "Restore MySQL database ..."
# Import the database
echo "cat $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz | $IMPORT_MYSQL" | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- /bin/bash 1>&3
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,55 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-mysql-legacy <host>
#/ Restore logical MySQL backup to a GitHub instance using legacy import script.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command when the rsync strategy is used.
# Note: Remove this file after 2.22 releases.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
export GHE_RESTORE_SNAPSHOT
# The directory holding the snapshot to restore
snapshot_dir="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
ssh_config_file_opt=
# legacy mode
IMPORT_MYSQL="unpigz | ghe-import-mysql"
GHE_RESTORE_HOST=$GHE_HOSTNAME
cleanup() {
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo rm -rf $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz"
}
trap 'cleanup' INT TERM EXIT
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/tmp'" 1>&3
# Transfer MySQL data from the snapshot to the GitHub instance.
cat $snapshot_dir/mysql.sql.gz | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo dd of=$GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz >/dev/null 2>&1"
log_info "Restore MySQL database ..."
# Import the database
echo "cat $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz | $IMPORT_MYSQL" | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- /bin/bash 1>&3
bm_end "$(basename $0)"

Просмотреть файл

@ -1,52 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-mysql-logical <host>
#/ Restore logical MySQL backup to a GitHub instance.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command when the rsync strategy is used.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish the remote version in GHE_REMOTE_VERSION.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
export GHE_RESTORE_SNAPSHOT
# The directory holding the snapshot to restore
snapshot_dir="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
ssh_config_file_opt=
# legacy mode
IMPORT_MYSQL="unpigz | ghe-import-mysql-mysqldump"
GHE_RESTORE_HOST=$GHE_HOSTNAME
cleanup() {
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo rm -rf $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz"
}
trap 'cleanup' INT TERM EXIT
ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo mkdir -p '$GHE_REMOTE_DATA_USER_DIR/tmp'" 1>&3
# Transfer MySQL data from the snapshot to the GitHub instance.
cat $snapshot_dir/mysql.sql.gz | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- "sudo dd of=$GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz >/dev/null 2>&1"
log_info "Restore MySQL database ..."
# Import the database
echo "cat $GHE_REMOTE_DATA_USER_DIR/tmp/mysql.sql.gz | $IMPORT_MYSQL" | ghe-ssh $ssh_config_file_opt "$GHE_RESTORE_HOST" -- /bin/bash 1>&3
bm_end "$(basename $0)"

Просмотреть файл

@ -1,166 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-pages <host>
#/ Restore repositories from an rsync snapshot of all Git repository data.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Find the pages to restore
pages_paths=$(cd $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/ && find pages -mindepth 5 -maxdepth 5 | cut -d / -f2-)
# No need to restore anything, early exit
if [ -z "$pages_paths" ]; then
log_warn "Warning: Pages backup missing. Skipping ..."
exit 0
else
increment-progress-total-count 5
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
ghe_remote_parallel
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-restore-XXXXXX)
opts="$GHE_EXTRA_SSH_OPTS"
ssh_config_file_opt=
tmp_list=$tempdir/tmp_list
remote_tmp_list=$remote_tempdir/remote_tmp_list
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
if $CLUSTER; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "pages-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
cleanup() {
rm -rf $tempdir
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $remote_tempdir
true
}
trap 'cleanup' EXIT
# Build a list of pages paths to send to the server to calculate
# the restore routes, something like:
#
# 5/d3/d9/44/10
# 0/02/e7/4f/27
# 4/c1/6a/53/31
# 3/34/17/3c/30
# 6/6e/a9/ab/29
# ...
#
# One pages path per line.
bm_start "$(basename $0) - Building pages list"
OLDIFS=$IFS; IFS=$'\n'
for path in $pages_paths; do
ghe_verbose "* Adding path $path to the list of pages to send"
echo $path
done > $tmp_list
IFS=$OLDIFS
bm_end "$(basename $0) - Building pages list"
# The server returns a list of routes:
#
# 5/d3/d9/44/10 pages-server-1 pages-server-2 pages-server-3
# 0/02/e7/4f/27 pages-server-1 pages-server-3 pages-server-4
# 4/c1/6a/53/31 pages-server-2 pages-server-3 pages-server-4
# 3/34/17/3c/30 pages-server-4 pages-server-2 pages-server-1
# 6/6e/a9/ab/29 pages-server-3 pages-server-2 pages-server-1
# ...
#
# One route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Transferring pages list"
cat $tmp_list | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_tmp_list
cat $tmp_list | ghe_debug
bm_end "$(basename $0) - Transferring pages list"
bm_start "$(basename $0) - Generating routes"
echo "cat $remote_tmp_list | github-env ./bin/dpages-cluster-restore-routes > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Fetching routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
cat $routes_list | ghe_debug
bm_end "$(basename $0) - Fetching routes"
bm_start "$(basename $0) - Processing routes"
cat $routes_list | awk -v tempdir="$tempdir" '{ for(i=2;i<=NF;i++){ print $1 > (tempdir"/"$i".rsync") }}'
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "Warning: no routes found, skipping pages restore ..."
exit 0
else
increment-progress-total-count 2
fi
bm_start "$(basename $0) - Restoring pages"
for file_list in $tempdir/*.rsync; do
if $CLUSTER; then
server=$(basename $file_list .rsync)
else
server=$host
fi
ghe_verbose "* Transferring Pages to $server"
log_rsync "BEGIN: pages rsync" 1>&3
ghe-rsync -avrHR --delete \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path="sudo -u git rsync" \
--files-from=$file_list \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/pages/./" \
"$server:$GHE_REMOTE_DATA_USER_DIR/pages/" 1>&3
done
log_rsync "END: pages rsync" 1>&3
bm_end "$(basename $0) - Restoring pages"
if $CLUSTER; then
bm_start "$(basename $0) - Finalizing routes"
ghe_verbose "Finalizing routes"
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash >&3 <<EOF
split -l 1000 $remote_routes_list $remote_tempdir/chunk
chunks=\$(find $remote_tempdir/ -name chunk\*)
$PARALLEL_CMD -i /bin/sh -c "cat {} | github-env ./bin/dpages-cluster-restore-finalize" -- \$chunks
EOF
increment-progress-total-count 1
bm_end "$(basename $0) - Finalizing routes"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,26 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-redis <host>
#/ Restore redis files from an rsync snapshot.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/ghe-backup-config"
# Show usage and bail with no arguments
[[ -z ${*} ]] && print_usage
# Grab host arg
GHE_HOSTNAME="${1}"
# Grab snapshot path arg
GHE_RESTORE_SNAPSHOT_PATH="${2}"
bm_start "$(basename "${0}")"
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-import-redis' < "$GHE_RESTORE_SNAPSHOT_PATH/redis.rdb" 1>&3
bm_end "$(basename "${0}")"

Просмотреть файл

@ -1,248 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-repositories <host>
#/ Restore repositories from an rsync snapshot of all Git repository data.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Check to make sure moreutils parallel is installed and working properly
ghe_parallel_check
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
network_paths=$(cd $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/ && find repositories -mindepth 6 -maxdepth 7 -name \*.git -exec dirname {} \; | uniq | grep nw | cut -d / -f2-)
if [ -z "$network_paths" ]; then
log_warn "Warning: Repositories backup missing. Skipping ..."
exit 0
else
increment-progress-total-count 5
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
ghe_remote_parallel
# Generate SSH config for forwarding
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-restore-XXXXXX)
ssh_config_file_opt=
opts="$GHE_EXTRA_SSH_OPTS"
tmp_list=$tempdir/tmp_list
remote_tmp_list=$remote_tempdir/remote_tmp_list
to_restore=$tempdir/to_restore
remote_to_restore=$remote_tempdir/remote_to_restore
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
remote_warnings=$remote_tempdir/repo_warnings
if $CLUSTER; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "git-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
cleanup() {
for hostname in $hostnames; do
ghe-gc-enable $ssh_config_file_opt $hostname:$port || true
done
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $remote_tempdir
rm -rf $tempdir
}
trap cleanup EXIT
# Disable remote GC operations
for hostname in $hostnames; do
ghe-gc-disable $ssh_config_file_opt $hostname:$port
done
# Build a list of network paths to send to the server to calculate
# the restore routes, something like:
#
# a/nw/a8/3f/02/100000855
# a/nw/a8/bc/8d/100000880
# a/nw/a5/06/81/100000659
# a/nw/a5/84/6f/100000708
# a/nw/a5/e0/01/146
# ...
#
# One network path per line.
bm_start "$(basename $0) - Building network list"
OLDIFS=$IFS; IFS=$'\n'
for path in $network_paths; do
# Get the network ID
# The network id from a repository is the last component of the path
# i.e. /data/repositories/a/nw/a5/bf/c9/37 network ID would be 37
ghe_verbose "* Adding network_path $path to the list of networks to send"
echo $path
done > $tmp_list
IFS=$OLDIFS
bm_end "$(basename $0) - Building network list"
# In cluster environments, we need to ensure that all repository networks are replicated back to the
# same Spokes nodes that they were present on when the backup was taken. For this, the list of
# routes of each repository network is first obtained. Afterward, an rsync file list is created for
# each Spokes node including only those repository networks for which there was a route to the
# respective Spokes node.
if $CLUSTER; then
log_info "* Restoring repository networks to cluster nodes according to Spokes routes" 1>&3
# The server returns a list of routes:
#
# a/nw/a8/3f/02/100000855 dgit-node1 dgit-node2 dgit-node3
# a/nw/a8/bc/8d/100000880 dgit-node1 dgit-node2 dgit-node4
# a/nw/a5/06/81/100000659 dgit-node3 dgit-node2 dgit-node4
# ...
#
# One route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Transferring network list"
cat $tmp_list | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_tmp_list
cat $tmp_list | ghe_debug
bm_end "$(basename $0) - Transferring network list"
bm_start "$(basename $0) - Generating routes"
restore_routes_script="github-env ./bin/dgit-cluster-restore-routes"
if ghe-ssh "$GHE_HOSTNAME" test -e /usr/local/share/enterprise/ghe-restore-network-routes; then
restore_routes_script="/usr/local/share/enterprise/ghe-restore-network-routes"
fi
echo "cat $remote_tmp_list | $restore_routes_script | grep 'git-server-' > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" -- /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Fetching routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
cat $routes_list | ghe_debug
bm_end "$(basename $0) - Fetching routes"
bm_start "$(basename $0) - Processing routes"
cat $routes_list | awk -v tempdir="$tempdir" '{ for(i=2;i<=NF;i++){ print $1 > (tempdir"/"$i".rsync") }}'
cat $routes_list | awk '{ n = split($1, p, "/"); printf p[n] " /data/repositories/" $1; $1=""; print $0}' > $to_restore
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
# There is no need to collect routes and split them by Spokes server in noncluster setups because
# we need to transfer all repository networks to the primary instance unconditionally, regardless of
# the Spokes route list captured during the backup. As we already have the list of all repository
# network paths, we can simply use that as the rsync file list in noncluster environments.
else
log_info "* Restoring all repository networks to target host unconditionally" 1>&3
cp "$tmp_list" "$tempdir/git-server-primary.rsync"
fi
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "Warning: no routes found, skipping repositories restore ..."
exit 0
else
increment-progress-total-count 3
fi
# rsync all the repository networks to the git server where they belong.
# One rsync invocation per server available.
bm_start "$(basename $0) - Restoring repository networks"
rsync_commands=()
for file_list in $tempdir/git-server-*.rsync; do
if $CLUSTER; then
server=$(basename $file_list .rsync)
else
server=$host
fi
rsync_commands+=("
if [ -n \"$GHE_VERBOSE\" ]; then
echo \"* Transferring repository networks to $server ($file_list) ...\" 1>&3
fi
echo \"$(date -u "+%FT%TZ") RSYNC BEGIN: repositories rsync\" 1>&3
ghe-rsync -avrR --delete \
-e \"ssh -q $opts -p $port $ssh_config_file_opt -l $user\" \
--rsync-path=\"sudo -u git rsync\" \
--files-from=$file_list \
\"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/repositories/./\" \
\"$server:$GHE_REMOTE_DATA_USER_DIR/repositories/\" 1>&3
echo \"$(date -u "+%FT%TZ") RSYNC END: repositories rsync\" 1>&3
")
done
if [ "$GHE_PARALLEL_ENABLED" = "yes" ]; then
"$GHE_PARALLEL_COMMAND" "${GHE_PARALLEL_RSYNC_COMMAND_OPTIONS[@]}" -- "${rsync_commands[@]}"
else
for c in "${rsync_commands[@]}"; do
eval "$c"
done
fi
bm_end "$(basename $0) - Restoring repository networks"
# Tell dgit about the repositories restored
if $CLUSTER; then
bm_start "$(basename $0) - Finalizing routes"
ghe_verbose "Finalizing routes"
cat $to_restore | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_to_restore
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash >&3 <<EOF
split -l 1000 $remote_to_restore $remote_tempdir/chunk
chunks=\$(find $remote_tempdir/ -name chunk\*)
$PARALLEL_CMD -i /bin/sh -c "cat {} | github-env ./bin/dgit-cluster-restore-finalize 2>>$remote_warnings" -- \$chunks
EOF
increment-progress-total-count 1
bm_end "$(basename $0) - Finalizing routes"
fi
bm_start "$(basename $0) - Updating repository info data"
if [ -d $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/repositories/info ]; then
ghe_verbose "* Transferring repository info data"
for hostname in $hostnames; do
if ! ghe-rsync -av --delete \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path="sudo -u git rsync" \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/repositories/info/" \
"$hostname:$GHE_REMOTE_DATA_USER_DIR/repositories/info" 1>&3; then
echo "Error restoring /data/repositories/info to $hostname" 1>&2
fi
done
else
ghe_verbose "* Removing repository info data"
if $CLUSTER; then
ghe-ssh "$GHE_HOSTNAME" ghe-cluster-each -r git -- rm -f /data/repositories/info/*
else
ghe-ssh "$GHE_HOSTNAME" -- sudo -u git rm -f /data/repositories/info/*
fi
fi
bm_end "$(basename $0) - Updating repository info data"
restore_warnings="$(ghe-ssh "$GHE_HOSTNAME" -- cat "$remote_warnings" 2>/dev/null || true)"
if [ -n "$restore_warnings" ]; then
log_warn "Warning: One or more repository networks failed to restore successfully. Please contact GitHub Enterprise Support for assistance."
echo "$restore_warnings"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,175 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-repositories-gist <host>
#/ Restore repositories from an rsync snapshot of all Git repository data.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Find the gists to restore
gist_paths=$(cd $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/ && find repositories -mindepth 6 -maxdepth 7 -name \*.git | grep gist | cut -d / -f2-)
# No need to restore anything, early exit
if [ -z "$gist_paths" ]; then
log_warn "Warning: Gist backup missing. Skipping ..."
exit 0
else
increment-progress-total-count 5
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
ghe_remote_parallel
# Generate SSH config for forwarding
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-restore-XXXXXX)
ssh_config_file_opt=
opts="$GHE_EXTRA_SSH_OPTS"
tmp_list=$tempdir/tmp_list
remote_tmp_list=$remote_tempdir/remote_tmp_list
to_restore=$tempdir/to_restore
remote_to_restore=$remote_tempdir/remote_to_restore
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
remote_warnings=$remote_tempdir/gist_warnings
if $CLUSTER; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $ssh_config_file"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "git-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
cleanup() {
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $tempdir
rm -rf $tempdir
}
trap cleanup EXIT
# Build a list of gist paths to send to the server to calculate
# the restore routes, something like:
#
# a/a8/3f/02/gist
# a/a8/bc/8d/gist
# a/a5/06/81/gist
# a/a5/84/6f/gist
# a/a5/e0/01/gist
# ...
#
# One network path per line.
bm_start "$(basename $0) - Building gist list"
OLDIFS=$IFS; IFS=$'\n'
for path in $gist_paths; do
ghe_verbose "* Adding gist $path to the list of networks to send"
echo $path
done > $tmp_list
IFS=$OLDIFS
bm_end "$(basename $0) - Building gist list"
# The server returns a list of routes:
#
# a/a8/3f/02/gist/gist_id.git dgit-node3 dgit-node2 dgit-node4
# a/a8/bc/8d/gist/gist_id.git dgit-node3 dgit-node2 dgit-node4
# a/a5/06/81/gist/gist_id.git dgit-node3 dgit-node2 dgit-node4
# a/a5/84/6f/gist/gist_id.git dgit-node1 dgit-node2 dgit-node4
# a/a5/e0/01/gist/gist_id.git dgit-node1 dgit-node2 dgit-node3
# ...
#
# One route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Transferring gist list"
cat $tmp_list | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_tmp_list
cat $tmp_list | ghe_debug
bm_end "$(basename $0) - Transferring gist list"
bm_start "$(basename $0) - Generating routes"
echo "cat $remote_tmp_list | github-env ./bin/gist-cluster-restore-routes > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" -- /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Transferring routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
cat $routes_list | ghe_debug
bm_end "$(basename $0) - Transferring routes"
bm_start "$(basename $0) - Processing routes"
cat $routes_list | awk -v tempdir="$tempdir" '{ for(i=2;i<=NF;i++){ print $1 > (tempdir"/"$i".rsync") }}'
cat $routes_list | awk '{ n = split($1, p, "/"); i = p[n]; sub(/\.git/, "", i); printf i " /data/repositories/" $1; $1=""; print $0}' > $to_restore
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "Warning: no routes found, skipping gists restore ..."
exit 0
else
increment-progress-total-count 2
fi
# rsync all the gist repositories
bm_start "$(basename $0) - Restoring gists"
for file_list in $tempdir/*.rsync; do
if $CLUSTER; then
server=$(basename $file_list .rsync)
else
server=$host
fi
ghe_verbose "* Transferring gists to $server"
ghe-rsync -avrR --delete \
-e "ssh -q $opts -p $port $ssh_config_file_opt -l $user" \
--rsync-path="sudo -u git rsync" \
--files-from=$file_list \
"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/repositories/./" \
"$server:$GHE_REMOTE_DATA_USER_DIR/repositories/" 1>&3
done
bm_end "$(basename $0) - Restoring gists"
if $CLUSTER; then
bm_start "$(basename $0) - Finalizing routes"
ghe_verbose "Finalizing routes"
cat $to_restore | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_to_restore
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash >&3 <<EOF
split -l 1000 $remote_to_restore $remote_tempdir/chunk
chunks=\$(find $remote_tempdir/ -name chunk\*)
$PARALLEL_CMD -i /bin/sh -c "cat {} | github-env ./bin/gist-cluster-restore-finalize 2>>$remote_warnings" -- \$chunks
EOF
increment-progress-total-count 1
bm_end "$(basename $0) - Finalizing routes"
fi
restore_warnings="$(ghe-ssh "$GHE_HOSTNAME" -- cat "$remote_warnings" 2>/dev/null || true)"
if [ -n "$restore_warnings" ]; then
log_warn "Warning: One or more Gists failed to restore successfully. Please contact GitHub Enterprise Support for assistance."
echo "$restore_warnings"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,146 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-secrets <host>
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Path to snapshot dir we're restoring from
export GHE_RESTORE_SNAPSHOT_PATH="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
host=$(ssh_host_part "$GHE_HOSTNAME")
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$host"
# Otherwise use legacy
# Function to restore a secret setting stored in a file.
# restore-secret <description> <file-name> <setting-name>
restore-secret() {
if [ -f "$GHE_RESTORE_SNAPSHOT_PATH/$2" ]; then
echo "Restoring $1 ..."
echo "ghe-config '$3' '$(cat "$GHE_RESTORE_SNAPSHOT_PATH/$2")'" |
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash
fi
}
log_info "Restoring secrets and applying cleanup ..." 1>&3
restore-secret "management console password" "manage-password" "secrets.manage"
restore-secret "password pepper" "password-pepper" "secrets.github.user-password-secrets"
restore-secret "kredz.credz HMAC key" "kredz-credz-hmac" "secrets.kredz.credz-hmac-secret"
restore-secret "kredz.varz HMAC key" "kredz-varz-hmac" "secrets.kredz.varz-hmac-secret"
restore-secret "management console argon2 secret" "manage-argon-secret" "secrets.manage-auth.argon-secret"
restore-secret "external MySQL password" "external-mysql-password" "secrets.external.mysql"
restore-secret "Chat Integration MSTeams app id" "chatops-msteams-app-id" "secrets.chatops.msteams.app-id"
restore-secret "Chat Integration MSTeams app password" "chatops-msteams-app-password" "secrets.chatops.msteams.app-password"
restore-secret "Chat Integration MSTeams public endpoint" "chatops-msteams-app-public-endpoint" "secrets.chatops.msteams.public-endpoint"
restore-secret "Chat Integration MSTeams bot handle" "chatops-msteams-bot-handle" "secrets.chatops.msteams.bot-handle"
restore-secret "Chat Integration MSTeams bot name" "chatops-msteams-bot-name" "secrets.chatops.msteams.bot-name"
restore-secret "Chat Integration Slack app id" "chatops-slack-app-id" "secrets.chatops.slack.app-id"
restore-secret "Chat Integration Slack client id" "chatops-slack-client-id" "secrets.chatops.slack.client-id"
restore-secret "Chat Integration Slack client secret" "chatops-slack-client-secret" "secrets.chatops.slack.client-secret"
restore-secret "Chat Integration Slack verification token" "chatops-slack-verification-token" "secrets.chatops.slack.verification-token"
restore-secret "Chat Integration Slack config token" "chatops-slack-config-token" "secrets.chatops.slack.config-token"
restore-secret "Chat Integration Slack public endpoint" "chatops-slack-public-endpoint" "secrets.chatops.slack.public-endpoint"
restore-secret "Chat Integration Slack signing secret" "chatops-slack-signing-secret" "secrets.chatops.slack.signing-secret"
restore-secret "Chat Integration Slack app level token" "chatops-slack-app-level-token" "secrets.chatops.slack.app-level-token"
restore-secret "Chat Integration Slack slack command" "chatops-slack-slash-command" "secrets.chatops.slack.slash-command"
restore-secret "Chat Integration Slack app name" "chatops-slack.app-name" "secrets.chatops.slack.app-name"
restore-secret "Chat Integration Slack socket mode" "chatops-slack.socket-mode" "secrets.chatops.slack.socket-mode"
restore-secret "Chat Integration public endpoint" "chatops-public-endpoint" "secrets.chatops.public-endpoint"
restore-secret "Chat Integration app type" "chatops-app-type" "secrets.chatops.app-type"
restore-secret "Chat Integration app id teams" "chatops-app-id-teams" "secrets.chatops.app-id-teams"
restore-secret "Chat Integration webhook secret teams" "chatops-webhook-secret-teams" "secrets.chatops.webhook-secret-teams"
restore-secret "Chat Integration client secret teams" "chatops-client-secret-teams" "secrets.chatops.client-secret-teams"
restore-secret "Chat Integration clien id teams" "chatops-client-id-teams" "secrets.chatops.client-id-teams"
restore-secret "Chat Integration storage secret" "chatops-storage-secret" "secrets.chatops.storage-secret"
restore-secret "Chat Integration session secret" "chatops-session-secret" "secrets.chatops.session-secret"
restore-secret "Chat Integration app id slack" "chatops-app-id-slack" "secrets.chatops.app-id-slack"
restore-secret "Chat Integration webhook secret slack" "chatops-webhook-secret-slack" "secrets.chatops.webhook-secret-slack"
restore-secret "Chat Integration client secret slack" "chatops-client-secret-slack" "secrets.chatops.client-secret-slack"
restore-secret "Chat Integration client id slack" "chatops-client-id-slack" "secrets.chatops.client-id-slack"
restore-secret "Packages aws access key" "packages-aws-access-key" "secrets.packages.aws-access-key"
restore-secret "Packages aws secret key" "packages-aws-secret-key" "secrets.packages.aws-secret-key"
restore-secret "Packages s3 bucket" "packages-s3-bucket" "secrets.packages.s3-bucket"
restore-secret "Packages storage service url" "packages-service-url" "secrets.packages.service-url"
restore-secret "Packages blob storage type" "packages-blob-storage-type" "secrets.packages.blob-storage-type"
restore-secret "Packages azure connection string" "packages-azure-connection-string" "secrets.packages.azure-connection-string"
restore-secret "Packages azure container name" "packages-azure-container-name" "secrets.packages.azure-container-name"
# Restore storage container prefix, but only if it exists, and the `-c` option is used with ghe-restore to avoid staging instances using production bucket settings
if [[ $RESTORE_SETTINGS == "true" ]]; then
if [[ -e "$GHE_RESTORE_SNAPSHOT_PATH/actions-storage-container-prefix" ]]; then
restore-secret "Actions storage container prefix" "actions-storage-container-prefix" "secrets.actions.storage.container-prefix"
else
log_warn "Actions storage container prefix not present in backup. Skipping ..."
fi
fi
restore-secret "Actions configuration database login" "actions-config-db-login" "secrets.actions.ConfigurationDatabaseSqlLogin"
restore-secret "Actions configuration database password" "actions-config-db-password" "secrets.actions.ConfigurationDatabaseSqlPassword"
restore-secret "Actions framework access token key secret" "actions-framework-access-token" "secrets.actions.FrameworkAccessTokenKeySecret"
restore-secret "Actions Url signing HMAC key primary" "actions-url-signing-hmac-key-primary" "secrets.actions.UrlSigningHmacKeyPrimary"
restore-secret "Actions Url signing HMAC key secondary" "actions-url-signing-hmac-key-secondary" "secrets.actions.UrlSigningHmacKeySecondary"
restore-secret "Actions OAuth S2S signing cert" "actions-oauth-s2s-signing-cert" "secrets.actions.OAuthS2SSigningCert"
restore-secret "Actions OAuth S2S signing key" "actions-oauth-s2s-signing-key" "secrets.actions.OAuthS2SSigningKey"
restore-secret "Actions OAuth S2S signing cert thumbprint" "actions-oauth-s2s-signing-cert-thumbprint" "secrets.actions.OAuthS2SSigningCertThumbprint"
restore-secret "Actions primary encryption cert thumbprint" "actions-primary-encryption-cert-thumbprint" "secrets.actions.PrimaryEncryptionCertificateThumbprint"
restore-secret "Actions AAD cert thumbprint" "actions-aad-cert-thumbprint" "secrets.actions.AADCertThumbprint"
restore-secret "Actions delegated auth cert thumbprint" "actions-delegated-auth-cert-thumbprint" "secrets.actions.DelegatedAuthCertThumbprint"
restore-secret "Actions runtime service principal cert" "actions-runtime-service-principal-cert" "secrets.actions.RuntimeServicePrincipalCertificate"
restore-secret "Actions S2S encryption cert" "actions-s2s-encryption-cert" "secrets.actions.S2SEncryptionCertificate"
restore-secret "Actions secondary encryption cert thumbprint" "actions-secondary-encryption-cert-thumbprint" "secrets.actions.SecondaryEncryptionCertificateThumbprint"
restore-secret "Actions service principal cert" "actions-service-principal-cert" "secrets.actions.ServicePrincipalCertificate"
restore-secret "Actions SPS validation cert thumbprint" "actions-sps-validation-cert-thumbprint" "secrets.actions.SpsValidationCertThumbprint"
restore-secret "Actions Launch secrets encryption/decryption" "actions-launch-secrets-private-key" "secrets.launch.actions-secrets-private-key"
restore-secret "Actions Launch deployer HMAC key" "actions-launch-deployer-hmac" "secrets.launch.deployer-hmac-secret"
restore-secret "Actions Launch Client id" "actions-launch-client-id" "secrets.launch.client-id"
restore-secret "Actions Launch Client secret" "actions-launch-client-secret" "secrets.launch.client-secret"
restore-secret "Actions Launch receiver webhook secret" "actions-launch-receiver-webhook-secret" "secrets.launch.receiver-webhook-secret"
restore-secret "Actions Launch app private key" "actions-launch-app-private-key" "secrets.launch.app-private-key"
restore-secret "Actions Launch app public key" "actions-launch-app-public-key" "secrets.launch.app-public-key"
restore-secret "Actions Launch app id" "actions-launch-app-id" "secrets.launch.app-id"
restore-secret "Actions Launch app relay id" "actions-launch-app-relay-id" "secrets.launch.app-relay-id"
restore-secret "Actions Launch action runner secret" "actions-launch-action-runner-secret" "secrets.launch.action-runner-secret"
restore-secret "Actions Launch service cert" "actions-launch-azp-app-cert" "secrets.launch.azp-app-cert"
restore-secret "Actions Launch service private key" "actions-launch-app-app-private-key" "secrets.launch.azp-app-private-key"
restore-secret "Actions Launch token oauth key" "actions-oauth-s2s-signing-key" "secrets.launch.token-oauth-key"
restore-secret "Actions Launch token oauth cert" "actions-oauth-s2s-signing-cert" "secrets.launch.token-oauth-cert"
# Restore secret scanning
restore-secret "secret scanning encrypted secrets current storage key" "secret-scanning-encrypted-secrets-current-storage-key" "secrets.secret-scanning.encrypted-secrets-current-storage-key"
restore-secret "secret scanning encrypted secrets delimited storage keys" "secret-scanning-encrypted-secrets-delimited-storage-keys" "secrets.secret-scanning.encrypted-secrets-delimited-storage-keys"
restore-secret "secret scanning encrypted secrets current shared transit key" "secret-scanning-encrypted-secrets-current-shared-transit-key" "secrets.secret-scanning.encrypted-secrets-current-shared-transit-key"
restore-secret "secret scanning encrypted secrets delimited shared transit keys" "secret-scanning-encrypted-secrets-delimited-shared-transit-keys" "secrets.secret-scanning.encrypted-secrets-delimited-shared-transit-keys"
restore-secret "secret scanning user content delimited encryption root keys" "secret-scanning-user-content-delimited-encryption-root-keys" "secrets.secret-scanning.secret-scanning-user-content-delimited-encryption-root-keys"
# Restore encrypted column
restore-secret "encrypted column encryption keying material" "encrypted-column-encryption-keying-material" "secrets.github.encrypted-column-keying-material"
restore-secret "encrypted column current encryption key" "encrypted-column-current-encryption-key" "secrets.github.encrypted-column-current-encryption-key"
bm_end "$(basename $0)"
exit 0

Просмотреть файл

@ -1,52 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-settings <host>
#/ Restore settings from a snapshot to the given <host>.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Path to snapshot dir we're restoring from
GHE_RESTORE_SNAPSHOT_PATH="$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"
log_info "Restoring license ..."
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-import-license' < "$GHE_RESTORE_SNAPSHOT_PATH/enterprise.ghl" 1>&3
log_info "Restoring settings and applying configuration ..."
# work around issue importing settings with bad storage mode values
( cat "$GHE_RESTORE_SNAPSHOT_PATH/settings.json" && echo ) |
sed 's/"storage_mode": "device"/"storage_mode": "rootfs"/' |
ghe-ssh "$GHE_HOSTNAME" -- '/usr/bin/env GHEBUVER=2 ghe-import-settings' 1>&3
# Restore SAML keys if present.
if [ -f "$GHE_RESTORE_SNAPSHOT_PATH/saml-keys.tar" ]; then
log_info "Restoring SAML keys ..."
cat "$GHE_RESTORE_SNAPSHOT_PATH/saml-keys.tar" |
ghe-ssh "$GHE_HOSTNAME" -- "sudo tar -C $GHE_REMOTE_DATA_USER_DIR/common/ -xf -"
fi
# Restore CA certificates if present.
if [ -f "$GHE_RESTORE_SNAPSHOT_PATH/ssl-ca-certificates.tar" ]; then
log_info "Restoring CA certificates ..."
cat "$GHE_RESTORE_SNAPSHOT_PATH/ssl-ca-certificates.tar" |
ghe-ssh "$GHE_HOSTNAME" -- "ghe-import-ssl-ca-certificates"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,32 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-snapshot-path [snapshot]
#/
#/ Print the path to the given snapshot. Defaults to current if no argument given.
#/ Exits with non-0 if the snapshot doesn't exist in GHE_DATA_DIR
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
if [ -n "$1" ]; then
GHE_RESTORE_SNAPSHOT="$(basename "$1")"
else
GHE_RESTORE_SNAPSHOT="current"
fi
# Resolve the snapshot id if we're restoring from current. This is mostly
# just for logging.
if [ "$GHE_RESTORE_SNAPSHOT" = "current" ]; then
GHE_RESTORE_SNAPSHOT=$(readlink "$GHE_DATA_DIR"/current || true)
fi
# Bail out if we don't have a good snapshot.
if [ -z "$GHE_RESTORE_SNAPSHOT" ] || [ ! -d "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT" ]; then
: "${GHE_RESTORE_SNAPSHOT:=current}"
log_error "Error: Snapshot '$GHE_RESTORE_SNAPSHOT' doesn't exist." 1>&2
exit 1
fi
echo "$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT"

Просмотреть файл

@ -1,28 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-ssh-keys <host>
#/ Restore ssh keys from an rsync snapshot.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/ghe-backup-config"
# Show usage and bail with no arguments
[[ -z ${*} ]] && print_usage
bm_start "$(basename "${0}")"
# Grab host arg
GHE_HOSTNAME="${1}"
# Grab snapshot path arg
GHE_RESTORE_SNAPSHOT_PATH="${2}"
bm_start "$(basename "${0}")"
ghe-ssh "$GHE_HOSTNAME" -- 'ghe-import-authorized-keys' < "$GHE_RESTORE_SNAPSHOT_PATH/authorized-keys.json" 1>&3
bm_end "$(basename "${0}")"

Просмотреть файл

@ -1,181 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-restore-storage <host>
#/
#/ Restore storage objects from an rsync snapshot.
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-restore command.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Check to make sure moreutils parallel is installed and working properly
ghe_parallel_check
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
bm_start "$(basename $0)"
# Grab host arg
GHE_HOSTNAME="$1"
# The snapshot to restore should be set by the ghe-restore command but this lets
# us run this script directly.
: ${GHE_RESTORE_SNAPSHOT:=current}
# Find the objects to restore
storage_paths=$(cd $GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/ && find storage -mindepth 4 -maxdepth 4 -type f -exec wc -c {} \;)
# No need to restore anything, early exit
if [ -z "$storage_paths" ]; then
log_warn "Warning: Storage backup missing. Skipping ..."
exit 0
else
increment-progress-total-count 5
fi
# Perform a host-check and establish GHE_REMOTE_XXX variables.
ghe_remote_version_required "$GHE_HOSTNAME"
ghe_remote_parallel
# Split host:port into parts
port=$(ssh_port_part "$GHE_HOSTNAME")
host=$(ssh_host_part "$GHE_HOSTNAME")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
hostnames=$host
tempdir=$(mktemp -d -t backup-utils-restore-XXXXXX)
remote_tempdir=$(ghe-ssh "$GHE_HOSTNAME" -- mktemp -d -t backup-utils-restore-XXXXXX)
ssh_config_file_opt=
opts="$GHE_EXTRA_SSH_OPTS"
tmp_list=$tempdir/tmp_list
remote_tmp_list=$remote_tempdir/remote_tmp_list
routes_list=$tempdir/routes_list
remote_routes_list=$remote_tempdir/remote_routes_list
if $CLUSTER; then
ssh_config_file="$tempdir/ssh_config"
ssh_config_file_opt="-F $tempdir/ssh_config"
opts="$opts -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no"
hostnames=$(ghe-cluster-find-nodes "$GHE_HOSTNAME" "storage-server")
ghe-ssh-config "$GHE_HOSTNAME" "$hostnames" > "$ssh_config_file"
fi
cleanup() {
rm -rf $tempdir
ghe-ssh "$GHE_HOSTNAME" -- rm -rf $remote_tempdir
true
}
trap 'cleanup' EXIT
# Find the routes (servers) for each storage object available locally
# Sends a list of "<oid> <size>" tuples with the following format:
#
# # OID bytes
# b8a48b6b122b4ef8175348d1d6fbd846d3b3ccc8fd7552b79f91125c4958e43b 5592001
# b851fd1f147c644a9de778f19090ea785b415c69e2a2fba35a65144fa2753ab9 7340032
# b65f657194ca6202c17b5062e4afc11843fc892a3f2febef8ac10971db7689a8 5591634
# b63c30f6f885e59282c2aa22cfca846516b5e72621c10a58140fb04d133e2c17 5592492
# ...
bm_start "$(basename $0) - Building object list"
echo "$storage_paths" | awk '{print $2 " " $1}' | awk -F/ '{print $NF }' > $tmp_list
bm_end "$(basename $0) - Building object list"
# The server returns the list of servers where the objects will be sent:
#
# # OID SERVER1 SERVER2 SERVER2
# b8a48b6b122b4ef8175348d1d6fbd846d3b3ccc8fd7552b79f91125c4958e43b server1 server2 server3
# bc4cdd292e6b5387df2a42a907fcd5f3b6804a5d5ab427184faea5ef118d635e server1 server2 server3
# ...
#
# One route per line.
#
# NOTE: The route generation is performed on the appliance as it is considerably
# more performant than performing over an SSH pipe.
#
bm_start "$(basename $0) - Transferring object list"
cat $tmp_list | ghe-ssh "$GHE_HOSTNAME" -- sponge $remote_tmp_list
cat $tmp_list | ghe_debug
bm_end "$(basename $0) - Transferring object list"
bm_start "$(basename $0) - Generating routes"
echo "cat $remote_tmp_list | github-env ./bin/storage-cluster-restore-routes > $remote_routes_list" | ghe-ssh "$GHE_HOSTNAME" /bin/bash
ghe-ssh "$GHE_HOSTNAME" -- cat $remote_routes_list | ghe_debug
bm_end "$(basename $0) - Generating routes"
bm_start "$(basename $0) - Fetching routes"
ghe-ssh "$GHE_HOSTNAME" -- gzip -c $remote_routes_list | gzip -d > $routes_list
cat $routes_list | ghe_debug
bm_end "$(basename $0) - Fetching routes"
bm_start "$(basename $0) - Processing routes"
cat $routes_list | awk -v tempdir="$tempdir" '{ for(i=2;i<=NF;i++){ print substr($1,1,1) "/" substr($1,1,2) "/" substr($1,3,2) "/" $1 > (tempdir"/"$i".rsync") }}'
ghe_debug "\n$(find "$tempdir" -maxdepth 1 -name '*.rsync')"
bm_end "$(basename $0) - Processing routes"
if [ -z "$(find "$tempdir" -maxdepth 1 -name '*.rsync')" ]; then
log_warn "Warning: no routes found, skipping storage restore ..."
exit 0
else
increment-progress-total-count 2
fi
# rsync all the objects to the storage server where they belong.
# One rsync invocation per server available.
bm_start "$(basename $0) - Restoring objects"
for file_list in $tempdir/*.rsync; do
if $CLUSTER; then
server=$(basename $file_list .rsync)
else
server=$host
fi
storage_user=$(ghe-ssh $ssh_config_file_opt $server:$port -- stat -c %U /data/user/storage || echo git)
rsync_commands+=("
if [ -n \"$GHE_VERBOSE\" ]; then
log_info \"* Transferring data to $server ...\" 1>&3
fi
echo \"$(date -u "+%FT%TZ") RSYNC BEGIN: storage rsync\" 1>&3
ghe-rsync -arvHR --delete \
-e \"ssh -q $opts -p $port $ssh_config_file_opt -l $user\" \
--rsync-path=\"sudo -u $storage_user rsync\" \
--files-from=$file_list \
--size-only \
\"$GHE_DATA_DIR/$GHE_RESTORE_SNAPSHOT/storage/./\" \
\"$server:$GHE_REMOTE_DATA_USER_DIR/storage/\" 1>&3
echo \"$(date -u "+%FT%TZ") RSYNC END: storage rsync\" 1>&3
")
done
if [ "$GHE_PARALLEL_ENABLED" = "yes" ]; then
"$GHE_PARALLEL_COMMAND" "${GHE_PARALLEL_RSYNC_COMMAND_OPTIONS[@]}" -- "${rsync_commands[@]}"
else
for c in "${rsync_commands[@]}"; do
eval "$c"
done
fi
bm_end "$(basename $0) - Restoring objects"
if $CLUSTER; then
bm_start "$(basename $0) - Finalizing routes"
ghe_verbose "Finalizing routes"
ghe-ssh "$GHE_HOSTNAME" -- /bin/bash >&3 <<EOF
split -l 1000 $remote_routes_list $remote_tempdir/chunk
chunks=\$(find $remote_tempdir/ -name chunk\*)
$PARALLEL_CMD -i /bin/sh -c "cat {} | github-env ./bin/storage-cluster-restore-finalize" -- \$chunks
EOF
increment-progress-total-count 1
bm_end "$(basename $0) - Finalizing routes"
fi
bm_end "$(basename $0)"

Просмотреть файл

@ -1,72 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-rsync
#/ Run rsync with silenced vanished file warnings (non-critical).
#
# Based on the rsync-no-vanished support script included with rsync:
# https://bugzilla.samba.org/show_bug.cgi?id=10356
set -o pipefail
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Don't use the feature checker for expected parameters as it can cause issues with server paths
# Check for --ignore-missing-args parameter support and remove if unavailable.
if rsync -h | grep '\-\-ignore-missing-args' >/dev/null 2>&1; then
parameters=("$@")
else
for parameter in "$@"; do
[[ ! $parameter == "--ignore-missing-args" ]] && parameters+=("$parameter") || ignore23=1
done
fi
# This prepends `--trust-sender` to the parameters if supported by the current version of rsync
# to mitigate the degradation of performance due to the resolution of CVE-2022-29154
# shellcheck source=share/github-backup-utils/ghe-rsync-feature-checker
# shellcheck disable=SC2046
if [ "$($( dirname "${BASH_SOURCE[0]}" )/ghe-rsync-feature-checker --trust-sender)" == "true" ]; then
parameters=("--trust-sender" "${parameters[@]}")
fi
# This adds `--compress` to the parameters if supported by the current version of rsync
# shellcheck source=share/github-backup-utils/ghe-rsync-feature-checker
# shellcheck disable=SC2046
if [ "$($( dirname "${BASH_SOURCE[0]}" )/ghe-rsync-feature-checker --compress)" == "true" ] && [ "$GHE_RSYNC_COMPRESSION_ENABLED" = "yes" ]; then
parameters+=("--compress")
fi
# This loads the $GHE_EXTRA_RSYNC_OPTS from the config file if available then adds them
# to the parameters and skip adding if already present in the parameters
# shellcheck source=share/github-backup-utils/ghe-rsync-feature-checker
# shellcheck disable=SC2046
if [ -n "$GHE_EXTRA_RSYNC_OPTS" ]; then
for extra_opt in $GHE_EXTRA_RSYNC_OPTS; do
if [ "$($( dirname "${BASH_SOURCE[0]}" )/ghe-rsync-feature-checker "$extra_opt")" == "true" ]; then
parameters+=("$extra_opt")
fi
done
fi
ignore_out='^(file has vanished: |rsync warning: some files vanished before they could be transferred)'
rsync_version_check=$(rsync --version | grep -E "version 3.[0-9]*.[0-9]*")
if [ -n "$rsync_version_check" ]; then
# rsync >= 3.x sends errors to stderr. so, we need to redirect to stdout before the pipe
rsync "${parameters[@]}" 2>&1 | (grep -E -v "$ignore_out" || true)
else
# rsync <3.x sends errors to stdout.
rsync "${parameters[@]}" | (grep -E -v "$ignore_out" || true)
fi
res=$?
# Suppress exits with 24.
if [ $res = 24 ]; then
res=0
fi
# Suppress exits with 23 if --ignore-missing-args was unavailable.
if [ $res = 23 ] && [ -n "$ignore23" ]; then
res=0
fi
exit $res

Просмотреть файл

@ -1,47 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-rsync-feature-checker <rsync-command>
#/ returns true if the passed rsync command is supported by the current version of rsync
#/ returns false if the passed rsync command is not supported by the current version of rsync
#/
set -o pipefail
# set the variable from the first argument and remove any leading dashes
rsync_command=$1
# extract dashes if present into variable
leading_dashes=$(echo "$rsync_command" | grep -oE "^-+")
# this normalizes the passed command by removing any leading dashes
normalized_command=$(echo "$rsync_command" | sed -E "s/^-+//")
# this checks the rsync command and returns the found command if it is valid
found_command=$(rsync -h | grep -oE "\B-+($normalized_command)\b" | head -n "1")
# this is the normalized found command
normalized_found_command=$(echo "$found_command" | sed -E "s/^-+//")
## Check if $leading_dashes is either - or --
if [ "$leading_dashes" == "-" ]; then
# check if the passed rsync command is valid and supported or if the normalized command is valid and supported
if [ "$rsync_command" == "$found_command" ]; then
echo "true"
else
echo "false"
fi
elif [ "$leading_dashes" == "--" ]; then
# check if the passed rsync command is valid and supported or if the normalized command is valid and supported
if [ "$rsync_command" == "$found_command" ]; then
echo "true"
else
echo "false"
fi
else
# check if the passed rsync command is valid and supported or if the normalized command is valid and supported
if [ "$rsync_command" == "$normalized_found_command" ]; then
echo "true"
else
echo "false"
fi
fi

Просмотреть файл

@ -1,106 +0,0 @@
#!/usr/bin/env bash
# get-rsync-size.sh Get the total size of dir-files to be transfered using rsync --link-dest
#
# Example:
# transfer_size repositories /dest_dir
#
# Sample output:
# Total transferred file size: 80 bytes
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$(dirname "${BASH_SOURCE[0]}")/ghe-backup-config"
# Location of last good backup for rsync --link-dest
backup_current="$GHE_DATA_DIR/current/"
# If we have a previous increment, avoid using those unchanged files using --link-dest support.
if [ -d "$backup_current" ]; then
link_dest="--link-dest=${GHE_DATA_DIR}/current"
fi
transfer_size()
{
local host=$GHE_HOSTNAME
local backup_data=$1
if [[ "$1" == "mssql" ]]; then
data_user_dir="/data/user/$1/backups"
else
data_user_dir="/data/user/$1"
fi
local dest_dir=$2
# Define user for rsync-path
case "$backup_data" in
"repositories" | "pages")
user="git"
;;
"storage")
user="alambic"
;;
"elasticsearch")
user="elasticsearch"
;;
"mysql")
user="mysql"
;;
"mssql")
user="mssql"
;;
"actions")
user="actions"
;;
"minio")
user="minio"
;;
*)
echo "Unknown user: $backup_data"
exit 1
;;
esac
# Check if instance is cluster and fetch appropriate primary host for the different components
if "$CLUSTER"; then
cluster_nodes_output=$(ghe-ssh "$host" "ghe-cluster-nodes -i")
case $1 in
elasticsearch | storage | pages | actions | mssql)
cluster_host=$(ghe-ssh "$host" "ghe-cluster-nodes -r $backup_data" | head -1)
;;
mysql)
cluster_host=$(ghe-ssh "$host" "ghe-config cluster.mysql-master")
;;
repositories)
cluster_host=$(ghe-ssh "$host" "ghe-cluster-nodes -r git" | head -1)
;;
*)
exit 0
;;
esac
host=$(echo "$cluster_nodes_output" | grep "$cluster_host" | awk '{print $2}' | head -1)
fi
# Get file transfer size estimates
if [ -d "${GHE_DATA_DIR}/current/$1" ]; then
total_file_size=$(ghe-rsync -arn --stats \
-e "ssh -q $GHE_EXTRA_SSH_OPTS -p 122 -l admin" \
--rsync-path="sudo -u $user rsync" \
"$link_dest"/"$1" \
--ignore-missing-args \
"$host:$data_user_dir/" \
"$dest_dir/" | grep "Total transferred file size" | sed 's/.*size: //; s/,//g')
else
total_file_size=$(ghe-rsync -arn --stats \
-e "ssh -q $GHE_EXTRA_SSH_OPTS -p 122 -l admin" \
--rsync-path="sudo -u $user rsync" \
--ignore-missing-args \
"$host:$data_user_dir/" \
"$dest_dir/" | grep "Total transferred file size" | sed 's/.*size: //; s/,//g')
fi
# Reduce mysql size as only the compressed file is transferred
if [[ "$1" == "mysql" ]]; then
echo "$total_file_size" | awk '{if ($1 > 0) printf "%.0f\n", int(($1+999999.5)/2000000); else printf "0\n"}'
else
echo "$total_file_size" | awk '{if ($1 > 0) printf "%.0f\n", int(($1+999999.5)/1000000); else printf "0\n"}'
fi
}

Просмотреть файл

@ -1,86 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-ssh [<option>...] <host> [<simple-command>...]
#/ echo 'set -o pipefail; <complex-command>...' | ghe-ssh [<option>...] <host> /bin/bash
#/ Helper to ssh into a GitHub instance with the right user and port. The first
#/ form should be used for simple commands; the second form should be used for
#/ complex commands that include pipelines or multiple commands.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
opts="$GHE_EXTRA_SSH_OPTS"
while true; do
case "$1" in
-p)
port="$2"
shift 2
;;
-l|-o|-F)
opts="$opts $1 $2"
shift 2
;;
-c|--clean)
cleanup_mux=1
break
;;
--)
echo "Error: illegal '--' in ssh invocation"
exit 1
;;
*)
host="$1"
shift
break
;;
esac
done
if [ -n "$cleanup_mux" ]; then
find "${TMPDIR}" -name ".ghe-sshmux-*" -type s -exec ssh -O stop -S {} - \; >/dev/null 2>&1 || true
exit
fi
# Show usage with no host
[ -z "$host" ] && print_usage
# Shift off '--' if given immediately after host.
if [ "$1" = "--" ]; then
shift
fi
# Split host:port into parts. The port is only used if not specified earlier.
port=${port:-$(ssh_port_part "$host")}
host=$(ssh_host_part "$host")
# Add user / -l option
user="${host%@*}"
[ "$user" = "$host" ] && user="admin"
opts="-l $user $opts"
# Bail out with error if the simple command form is used with complex commands.
# Complex
if echo "$*" | grep "[|;]" >/dev/null || [ "$(echo "$*" | wc -l)" -gt 1 ]; then
echo "fatal: ghe-ssh: Attempt to invoke complex command with simple command form." 1>&2
echo "See ghe-ssh --help for more on correcting." 1>&2
exit 1
fi
if [ -z "$GHE_DISABLE_SSH_MUX" ]; then
controlpath="$TMPDIR/.ghe-sshmux-$(echo -n "$user@$host:$port" | git hash-object --stdin | cut -c 1-8)"
# shellcheck disable=SC2089 # We don't use bash arrays
opts="-o ControlMaster=auto -o ControlPath=\"$controlpath\" -o ControlPersist=10m -o ServerAliveInterval=10 $opts"
# Workaround for https://bugzilla.mindrot.org/show_bug.cgi?id=1988
if ! [ -S "$controlpath" ]; then
# shellcheck disable=SC2090 # We don't need the quote/backslashes respected
( cd "$TMPDIR" && ssh -f $opts -p $port -o BatchMode=yes "$host" -- /bin/true 1>/dev/null 2>&1 || true )
fi
fi
# Turn on verbose SSH logging if needed
$GHE_VERBOSE_SSH && set -x
# Exec ssh command with modified host / port args and add nice to command.
# shellcheck disable=SC2090 # We don't need the quote/backslashes respected
exec ssh $opts -p $port -o BatchMode=yes "$host" -- $GHE_NICE $GHE_IONICE "$@"

Просмотреть файл

@ -1,86 +0,0 @@
#!/usr/bin/env bash
#/ Usage: ghe-ssh-config <ghe_host> [<host>...]
#/
#/ Returns a SSH configuration file which configures the connections either through proxy
#/ using <ghe_host> or connect directly by fetching the IP to list of <host> by <ghe_host>
#/
#/ Note: This script typically isn't called directly. It's invoked by the
#/ ghe-[backup|restore]-* commands.
set -e
# Bring in the backup configuration
# shellcheck source=share/github-backup-utils/ghe-backup-config
. "$( dirname "${BASH_SOURCE[0]}" )/ghe-backup-config"
# Show usage and bail with no arguments
[ -z "$*" ] && print_usage
GHE_HOSTNAME="$1"
shift
hosts="$*"
ghe_host=$(ssh_host_part "$GHE_HOSTNAME")
ghe_port=$(ssh_port_part "$GHE_HOSTNAME")
ghe_user="${ghe_host%@*}"
[ "$ghe_user" = "$ghe_host" ] && ghe_user="admin"
opts="$GHE_EXTRA_SSH_OPTS"
# In case we are connecting to node without <role>-server-<uuid> format, revert back to proxy mode
[ -z "$GHE_DISABLE_SSH_MUX" ] && opts="-o ControlMaster=auto -o ControlPath=\"$TMPDIR/.ghe-sshmux-$(echo -n "$ghe_user@$ghe_host:$ghe_port" | git hash-object --stdin | cut -c 1-8)\" -o ControlPersist=10m -o ServerAliveInterval=10 $opts"
# Allow GIT_CONFIG to be specified manually for CI.
if [ -z "$GIT_CONFIG" ]; then
# If an individual backup step is being run directly, or this is a restore
# then ghe-backup-settings won't have ran, which transfers cluster.conf.
if ! $GHE_RESTORE_SNAPSHOT_PATH && [ -f "$GHE_SNAPSHOT_DIR/cluster.conf" ]; then
GIT_CONFIG="$GHE_SNAPSHOT_DIR/cluster.conf"
else
cluster_config_file="$(mktemp -t ".ghe-cluster-conf-XXXXXX")"
ghe-ssh "$GHE_HOSTNAME" -- "sudo cat $GHE_REMOTE_CLUSTER_CONF_FILE 2>/dev/null" > "$cluster_config_file"
GIT_CONFIG="$cluster_config_file"
fi
fi
export GIT_CONFIG_NOSYSTEM=1 GIT_CONFIG
for host in $hosts; do
# Determine if a <role>-server-<uuid> host has been specified, and if so
# generate the relevant SSH configuration.
if [[ "$host" =~ [A-Za-z]+-server-[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12} ]]; then
for cluster_host in $(git config --get-regexp cluster.*.hostname | cut -d ' ' -f2); do
uuid=$(git config cluster.$cluster_host.uuid)
if [[ "$host" =~ [A-Za-z]+-server-$uuid ]]; then
if [ -n "$(git config cluster.$cluster_host.ipv6)" ]; then
ip=$(git config "cluster.$cluster_host.ipv6")
elif [ -n "$(git config cluster.$cluster_host.ipv4)" ]; then
ip=$(git config "cluster.$cluster_host.ipv4")
fi
if [ -z "$temp_ssh_config_file" ]; then
temp_ssh_config_file="$(mktemp -t ".hostfile-XXXXXX")"
echo "Host *
User $ghe_user
Port $ghe_port
BatchMode yes" >> "$temp_ssh_config_file"
fi
echo "Host git-server-$uuid pages-server-$uuid storage-server-$uuid
HostName $ip
Port $ghe_port
StrictHostKeyChecking no" >> "$temp_ssh_config_file"
# If proxy mode is set
if [ -n "$GHE_SSH_PROXY" ]; then
echo "ProxyCommand ssh -q $opts -p $ghe_port $ghe_user@$ghe_host nc.openbsd %h %p" >> "$temp_ssh_config_file"
fi
fi
done
else
cat <<EOF
Host $host
ProxyCommand ssh -q $opts -p $ghe_port $ghe_user@$ghe_host nc.openbsd %h %p
StrictHostKeyChecking=no
EOF
fi
done
if [ -n "$temp_ssh_config_file" ]; then
cat "$temp_ssh_config_file"
fi

Просмотреть файл

@ -1,531 +0,0 @@
00:1e:18:e9:74:fb:d6:4e:ef:88:38:59:16:a3:a7:a3
00:3d:07:23:20:f6:dc:fc:28:cb:1e:96:08:16:a6:8c
00:61:8b:a4:90:37:3f:8c:02:3c:5c:eb:3f:f7:c4:e0
01:6f:6a:63:3b:a3:54:d5:e8:50:70:c2:83:bf:50:4b
01:80:47:81:28:6f:65:d3:dd:ca:2d:99:84:50:08:7d
01:bb:ca:3e:51:67:32:a0:c6:ba:4c:06:23:f7:19:6d
02:d8:1b:c8:7b:74:4b:70:65:a2:14:5a:7b:70:32:75
03:3a:ff:42:01:fd:2a:08:d0:08:1a:78:25:79:19:28
03:56:f0:3d:13:f2:3a:3b:fb:b7:0a:0b:db:51:aa:cd
04:47:f2:79:68:a0:ef:e2:ff:3b:3d:9f:de:c9:64:7c
04:bb:1b:fc:c3:8c:36:7b:a1:13:7a:27:d7:23:02:b3
06:b8:78:f7:0e:d1:76:3f:43:9b:62:f9:75:b2:36:64
06:d1:97:f1:ee:0c:91:c5:8d:81:6c:05:37:eb:74:b7
07:17:ba:80:38:11:4b:60:d1:36:67:93:16:d2:ca:be
07:94:d6:29:5d:2b:7f:d4:e2:e6:ea:b4:80:50:e2:81
08:0b:1e:a1:2b:72:eb:3e:6d:ec:65:cf:74:12:ef:50
09:19:0b:ba:c6:05:09:33:97:78:bb:fa:f6:16:51:40
0a:35:77:9f:53:47:c1:d1:78:f4:fc:c9:bc:f4:0f:bb
0a:61:2a:e4:9c:d7:4a:70:05:ea:54:20:fc:7a:2f:78
0a:74:29:59:29:f8:2f:17:e1:7a:b7:3f:27:19:cb:28
0b:10:8c:94:df:b6:ed:a3:e7:09:52:05:70:c8:e0:28
0b:7d:ee:07:7a:55:69:fe:cd:e7:35:90:29:c0:b4:02
0b:ce:93:2a:9d:5a:42:05:44:9a:d0:92:a2:6d:52:31
0d:53:09:71:03:e1:55:c7:18:b2:38:ff:a4:42:bf:e4
0d:9d:6a:69:8e:37:2a:43:d4:ef:95:cb:0b:8b:4d:66
0d:c1:26:a4:f8:f6:e5:25:71:0b:8e:01:d7:b6:d9:84
0e:20:8a:77:bf:eb:2a:ad:d9:11:c5:3e:32:c9:d1:4c
0e:df:dd:07:c7:29:d5:fb:cc:0a:7a:b9:1f:de:d5:5b
0f:ee:8d:02:2d:e1:76:f3:eb:f5:af:cb:38:9a:1c:33
10:17:7e:66:b8:f7:5c:4a:4c:ae:c7:d0:c6:b3:cb:f0
11:44:18:61:ca:fb:3f:82:06:68:a5:91:0a:f0:45:83
11:51:12:89:0c:86:3b:b7:98:6f:52:d5:c9:83:8b:3e
11:99:17:05:a8:ee:c6:c9:04:c2:a2:c0:24:bf:c9:ce
11:b6:17:8e:a8:37:f9:d6:f5:b2:0a:38:d6:54:cd:1e
11:fb:cc:a5:a6:06:ed:f0:33:cc:02:57:6e:ac:23:e1
11:fe:55:ad:89:71:d9:fa:02:88:f1:a1:28:f3:48:cd
12:5f:ac:a5:94:ab:8a:c4:72:70:d5:a2:27:15:f5:fe
14:32:6e:03:76:a6:82:86:a1:5b:8f:9a:f1:0d:df:21
14:4a:ce:e5:12:fa:c1:0d:31:3c:b5:ba:df:3a:b5:08
14:74:44:4f:6a:ef:d1:9d:8f:60:aa:de:33:63:b2:0a
16:25:d0:a8:b4:21:18:99:29:e3:2b:78:04:69:47:d6
16:45:9a:7a:b5:f0:8b:06:15:8b:a9:05:c3:58:5a:f8
16:4d:b7:05:cb:e7:95:a5:0a:43:5a:de:9c:31:96:02
17:27:6c:c5:29:7f:09:ab:60:0a:f7:0b:b8:a9:87:f6
17:58:62:5b:ed:ba:db:fd:41:27:c8:2f:20:08:ef:ad
17:b7:52:02:58:56:04:2c:05:be:87:93:bf:c4:f9:6b
17:fe:11:03:48:5e:37:77:25:ae:9c:21:1e:d6:db:66
18:ae:cc:5b:8d:bb:25:bc:f0:9f:b1:b5:c4:9e:e8:0b
19:89:07:ce:da:54:11:a4:12:a6:7d:b5:d3:9b:5a:2c
1a:28:cc:9a:8b:2d:23:16:c7:88:db:9a:fe:a1:b2:a9
1a:71:c4:5d:20:d8:5b:e7:69:99:21:61:2c:eb:5c:5c
1a:fe:0f:6d:a0:2f:6b:4d:2c:7a:bc:a7:98:90:7a:89
1c:0c:bd:16:33:f4:4c:43:6f:1a:8d:e0:e0:cb:0b:56
1c:b4:a1:16:e9:14:f1:ce:d2:97:ec:09:8e:b9:1e:5e
1c:ec:88:95:92:3e:7e:8d:ec:b8:54:45:02:80:ad:92
1d:39:cc:b9:5a:b5:76:6e:00:d4:0a:87:ce:87:bd:2d
1d:ca:2a:ce:86:59:8f:59:b2:fb:5f:42:b5:3f:5c:09
1e:1a:b2:5d:b2:11:f8:0e:51:b6:95:e2:49:10:88:a3
1e:92:9f:93:d7:88:1b:ae:39:72:d6:92:cb:3a:f0:cb
1e:cb:30:2a:63:7e:3f:85:ad:04:6f:78:e2:cc:60:3a
1e:d8:cf:7d:3d:13:f9:3c:c1:e7:5a:14:f2:71:33:14
1e:ee:23:36:a1:3c:7d:0d:20:15:fb:f4:f6:89:90:67
1f:57:ba:04:05:9a:6a:e8:27:da:61:42:0f:b7:07:f0
1f:c5:9f:c5:d3:40:65:cd:61:2d:37:0f:db:c3:b9:16
1f:ed:81:ee:b1:96:82:b7:b6:f0:b2:83:04:f6:62:46
20:bf:67:d9:c1:a6:0b:9b:0e:2a:22:86:ed:29:ba:6a
21:23:9d:46:e5:46:90:77:9f:e8:e2:6c:c3:47:f2:2e
21:8a:c9:fc:52:61:e3:66:81:bc:ef:d9:a5:04:9a:96
21:9e:f5:99:75:b4:34:2e:ac:78:34:3d:92:0b:c0:e5
22:cc:ca:f9:26:ae:4a:30:b1:25:02:f7:77:cc:fd:75
22:e6:ca:93:be:52:68:a5:51:c3:b2:db:ce:50:64:68
23:74:57:f4:e9:a5:7b:c4:43:22:9b:9c:13:dd:48:94
24:a1:0c:d9:be:c5:0d:3a:dd:5a:1e:54:da:76:03:98
25:30:0e:16:bb:b8:d3:4e:c3:73:be:e9:42:86:55:c5
25:45:e5:a3:93:97:14:59:2a:b0:1a:1d:12:5c:4c:37
25:c7:bf:5a:44:7d:bf:f9:0d:69:13:ea:3d:69:dc:5d
26:b1:3f:04:90:e7:98:8c:3f:ca:2b:85:71:78:ca:23
26:d0:33:db:e9:92:e2:5a:3f:d2:5f:e8:2c:13:ee:6f
27:34:2d:e5:35:dd:2c:58:68:71:c6:34:a7:12:b1:0e
27:3e:e7:fc:47:62:19:29:d7:cb:34:bf:14:64:be:db
27:8e:30:11:10:ec:3a:30:40:98:49:7c:37:8e:e6:78
27:99:83:24:ed:e0:9b:49:68:d9:8d:c1:e2:29:1e:62
27:a9:de:f6:f4:c4:ff:de:68:42:bf:20:15:fc:76:2b
28:42:e5:ac:70:3a:43:ee:ae:85:44:8d:e5:c3:d7:cb
28:7e:1d:e7:10:e2:cb:4d:87:2d:9d:d2:55:8c:65:69
28:ad:d7:c4:d8:d0:73:c5:37:f4:b3:86:5f:17:03:d1
29:3c:b2:41:42:87:b5:1c:f4:f5:30:ba:20:c4:1a:68
29:7e:20:3e:2d:30:ba:ef:2d:87:cb:ac:c4:d1:40:c0
29:cb:bc:e8:bb:6a:9b:b6:ad:54:a8:10:f6:6d:87:1c
2c:71:09:84:fc:28:af:da:05:b3:24:92:79:a4:a3:65
2c:a9:cb:e4:5a:c7:01:46:e8:dc:be:39:5a:5f:23:c1
2e:ca:24:57:b9:1f:3b:c6:25:21:92:18:4b:cc:3d:5d
2f:9a:fe:1a:f6:20:38:d6:4d:a7:0b:01:65:c6:3d:7e
30:6e:89:da:84:18:84:dd:ac:4b:c4:4b:b4:1e:f7:53
31:32:d4:f7:da:bc:bb:43:8e:d8:9c:54:45:95:b3:ee
31:c6:5e:da:b9:7b:50:96:e3:7a:a8:0d:ec:b8:e0:9c
33:c0:ef:01:ec:a2:7f:5f:98:29:f9:14:d3:aa:67:d8
33:c9:1d:8b:e6:c0:41:16:27:07:11:0a:74:81:5d:b0
33:e8:14:f4:b3:17:3d:d2:ff:fb:29:c7:1c:a1:d3:97
34:07:4d:e8:6b:76:6c:d7:99:00:37:41:75:a6:61:56
34:a2:60:9a:a5:4b:5b:85:d4:48:e6:74:c5:d3:70:3c
34:e6:d8:e0:3b:e8:5c:53:6c:35:46:bd:5c:5a:6a:38
36:a7:fd:e9:60:60:86:0c:02:e7:55:ed:cb:53:fb:90
37:56:00:e4:ae:be:e0:86:a3:ca:be:95:15:0f:56:4c
38:09:f2:78:7b:eb:7d:68:40:a8:47:2c:d7:2d:14:05
38:0f:69:3b:14:dc:33:43:f8:b0:02:a8:ff:61:4e:3d
38:1e:d0:ee:e6:c0:0c:a7:83:84:90:89:b2:1e:b5:b0
38:f4:0b:0f:2e:09:59:4b:9a:d6:99:de:32:84:cf:83
39:06:09:72:0a:70:6d:25:da:dd:5d:43:eb:9a:7f:c5
39:12:ab:30:6c:32:0f:9e:ae:59:f7:f9:6c:7a:18:c7
39:74:02:dd:5a:56:ad:f0:d2:3f:d1:35:74:2a:c9:d3
39:96:ab:4b:e9:59:ce:87:de:54:a2:30:3a:19:e7:69
3a:22:e3:7b:a3:38:ae:6b:33:96:9d:a7:4d:71:06:5d
3a:e3:9e:30:e3:6c:68:cc:d0:57:d8:ae:cc:6c:13:01
3b:dc:aa:c7:83:3d:cc:4e:00:88:69:78:fd:c0:39:76
3c:2a:fd:86:17:c4:eb:87:71:35:1b:69:16:22:15:e3
3c:f6:7b:77:80:e8:8d:5a:1b:ba:7b:ca:de:9b:0c:9c
3d:38:9c:12:e9:51:45:2d:c4:2a:db:9a:35:8c:8c:bc
3e:4d:78:2c:8c:e9:aa:03:65:d7:b4:37:1a:87:0e:e7
3e:6d:23:16:2f:0e:8f:28:db:a7:f0:b3:b0:7e:78:6d
3e:7b:2c:f7:bc:88:3e:73:46:9e:96:f1:32:41:f4:22
3e:89:2b:39:49:f9:46:fc:cc:6e:a3:65:7f:25:4f:48
3e:d4:4d:e7:96:6b:aa:21:f1:5a:14:32:ac:4d:6a:de
3f:99:1d:13:0b:21:a4:cd:e2:cc:5f:fe:b8:ab:3a:85
40:6c:dc:5c:f7:3e:5c:28:23:2c:6f:f8:7f:aa:18:7c
41:5d:5f:e5:6b:7e:da:aa:3d:3e:b9:df:87:33:f0:28
42:95:59:c4:ab:4f:8d:9f:50:64:53:d7:c2:1e:4c:4d
42:9f:a8:bf:1c:33:0b:5b:f8:bf:5c:7b:3d:f6:34:81
42:f5:7b:0e:96:ab:2e:87:d6:a1:81:c8:26:f7:af:9a
43:9d:f8:78:c5:07:5b:c0:98:25:67:a0:5f:2d:30:83
44:30:fc:83:6e:cc:d4:f3:87:51:e9:be:f7:99:39:3c
45:a2:1a:00:71:d1:43:ac:87:cc:b0:a3:5a:a1:9a:56
45:b3:1c:75:fe:f3:53:0f:d4:ca:6a:4d:5d:ec:3d:dc
45:fe:55:13:c1:e7:c8:e9:eb:98:de:39:a6:f7:2c:ed
46:29:59:2e:c5:10:c1:d4:0b:dc:3c:e4:e2:4f:3c:37
46:50:92:6e:c9:a3:cf:8b:f7:ce:83:f8:4e:55:e3:70
46:9f:fe:2e:6f:dd:52:7b:7c:9b:a4:d0:51:a1:be:93
46:d6:93:0b:1a:a7:16:48:79:6f:07:8e:f8:bd:89:49
47:29:8e:88:11:66:0d:fe:a7:76:d1:a3:28:a2:7c:60
47:ac:25:c1:3d:b4:d0:95:33:10:88:6e:73:25:5d:90
47:f4:65:31:e8:a0:a8:35:3d:c1:90:a5:73:95:45:e0
48:7a:7b:9e:85:dd:34:0b:47:d8:97:13:2a:4c:c2:1d
49:1e:2c:cb:8f:c2:6a:8c:a2:ec:7f:df:bb:28:29:a4
49:26:48:31:44:ca:2f:90:71:93:2f:f7:68:93:ed:0f
49:b2:4f:de:c4:61:50:aa:60:94:36:d0:24:54:5d:d5
49:f1:58:ce:00:d2:6d:1d:79:68:f6:72:9c:c8:c4:1d
4a:89:1f:fb:b5:33:fb:7c:4c:4d:4c:24:2c:41:66:81
4a:8b:3a:74:b9:06:dd:22:93:1b:33:be:34:22:f2:5a
4a:da:49:be:36:60:08:9f:f9:7a:9d:ff:d0:f8:1d:41
4a:fb:75:6b:ec:68:2b:a8:ce:e0:aa:1d:96:1b:0b:be
4b:64:04:93:f2:2d:ff:00:5e:22:e9:d5:2b:93:4f:1a
4b:aa:34:a1:36:bc:f2:82:c6:7b:80:63:ba:ba:99:2c
4b:b3:af:d6:ee:32:93:45:20:51:81:f9:25:be:2a:13
4b:b7:0e:ed:7c:b8:ff:53:a5:bb:10:d2:fe:f6:47:9b
4c:25:4b:69:90:a4:d7:22:e0:95:8a:9c:50:65:27:6e
4c:57:fc:61:e8:ae:e4:82:86:af:44:72:83:54:94:97
4d:0b:7d:4a:0e:08:36:7d:49:ee:f3:de:c5:67:85:73
4d:a7:db:15:2c:0c:6c:66:02:46:6b:f4:c6:c1:88:e0
4d:ab:27:d0:9b:6e:20:f9:14:e2:43:56:a9:85:46:70
4d:b1:3f:2e:59:29:ae:6a:06:78:e0:10:ff:7c:14:a9
4d:ef:2b:0a:8d:ea:fe:48:3e:d1:48:6f:ad:a0:a8:73
4d:f2:16:ff:10:2e:8f:32:63:1d:28:22:64:31:69:0f
4e:e4:8b:3d:2b:ec:f8:32:42:ec:66:fd:be:fc:fc:d0
4f:02:61:52:d5:10:37:a8:d8:b6:f9:43:3d:42:e1:08
4f:63:97:2f:6a:86:27:d1:35:af:9a:dc:9a:fd:00:6b
4f:fe:5d:9a:70:9e:7e:4d:e0:51:f4:d4:6b:34:a8:3c
50:1c:00:b1:74:2a:d2:44:3d:90:8e:63:af:52:22:12
50:3f:cc:2f:01:78:fe:f7:86:4a:23:29:a8:15:46:b9
50:af:22:33:c0:5d:47:77:2f:60:bd:0d:53:d3:ee:50
51:14:57:97:ff:f4:81:24:31:ce:8e:5f:38:f6:47:02
51:63:7a:dc:d4:7d:c7:bb:bf:ff:60:2a:d6:13:68:c5
51:b8:8a:55:9d:02:12:ea:12:b7:10:22:50:76:59:5c
51:d8:0c:33:10:0d:84:68:90:cf:9e:72:7b:f9:b6:cb
52:6d:53:23:b4:20:93:d1:2e:91:c7:ba:d4:3c:a8:20
52:af:2c:8f:5a:c1:9a:23:99:3d:d4:3a:ba:7a:96:b4
53:48:55:2b:5a:7a:01:e7:3f:0d:40:a1:ca:40:aa:f8
53:55:80:8b:25:60:01:cb:4d:d2:70:93:83:36:94:e0
53:cc:c9:d6:06:84:73:49:83:0f:ee:86:69:32:9b:e2
54:45:77:8c:e8:10:e0:84:4d:33:e5:dd:e8:65:78:ec
54:4d:a0:0e:45:7d:ec:2c:7b:0c:97:2b:50:0b:a0:fc
54:ca:36:f2:83:a5:ad:cf:71:55:15:94:3f:02:59:c4
55:12:1b:8a:83:06:c5:62:5b:18:af:62:a1:82:48:6c
55:58:c8:fe:6c:d3:ae:2a:3a:87:77:c1:eb:de:24:8b
55:9a:fa:4f:3d:e9:46:ed:9f:e6:c4:0a:f5:6a:69:d4
56:62:86:e7:ef:35:71:6f:b7:c3:68:3d:b5:dd:12:33
56:6e:4d:18:c4:ce:66:96:7a:6c:99:0a:c1:70:aa:25
56:bd:22:f2:35:b9:c2:88:3d:c4:c7:eb:8e:c1:42:1e
56:bd:df:db:27:32:3e:24:21:8d:9e:5b:4f:e2:b3:c2
57:23:4d:9a:10:31:18:8d:1c:40:2e:66:f4:7e:4e:9b
57:bb:e0:7e:3d:58:64:e6:d4:81:9c:c3:f2:07:f9:52
58:18:a1:5a:c3:72:5f:9b:5e:6e:e3:2f:dd:16:2e:bd
59:1d:76:0f:19:01:72:46:ed:20:35:ad:e4:f7:a5:0d
5a:b5:d0:12:ea:94:74:c2:29:f2:5f:4e:f5:20:f1:15
5a:ea:ec:0c:37:23:08:81:5f:a1:d2:34:9f:89:fe:58
5b:87:8d:d6:94:97:03:85:0b:07:c3:d4:3b:87:3d:38
5b:e4:88:a2:2c:49:0f:9f:de:41:0c:d5:fc:cc:78:b8
5d:06:17:3b:74:5b:e5:a5:23:3c:6d:ae:ed:09:03:7c
5e:99:49:26:a2:f8:6a:d6:a4:4e:28:89:4c:6a:45:9f
5e:bc:71:89:33:2a:e6:a2:dc:5c:18:f0:fd:e9:c6:a4
5f:4f:03:35:22:4d:02:f9:d1:a1:58:13:f3:ac:b1:54
5f:95:66:b2:7a:40:23:0b:89:1f:5b:01:65:34:ed:b8
5f:98:09:52:f9:3c:4e:c0:0f:b7:12:32:96:29:3f:28
60:13:12:7a:f6:39:7e:5d:5c:a0:4c:c3:ab:3f:36:f7
60:63:24:1f:b9:e8:c5:af:2c:12:26:f7:ee:5d:fb:42
61:5d:72:64:3c:c7:16:39:4d:fa:89:10:3c:84:d5:ee
61:5e:07:b2:6b:97:f3:4b:8c:95:9d:83:28:d1:9e:f1
61:bc:40:b4:d6:ae:2e:19:4a:ad:a0:64:ed:21:42:2d
62:1e:ea:60:18:82:cb:19:45:79:36:f2:ad:6a:ee:f7
63:3b:4c:ef:34:cc:ea:d9:c9:1e:be:c4:c2:98:d0:bc
63:8a:b0:91:ed:75:ba:1a:23:87:b9:e1:ac:6b:e9:68
63:98:e7:95:6c:19:7a:51:24:1c:2a:86:4b:5d:44:97
63:9e:59:15:b1:7b:07:36:5b:8f:f1:f7:1c:f3:d9:f2
64:1e:d5:58:90:a8:6a:1e:38:67:8b:4c:b6:f4:d8:07
64:6f:0b:70:16:33:b7:dd:bf:90:ee:2e:c9:d7:32:1e
65:5c:ea:98:d4:35:6e:ad:54:0d:3f:53:9f:52:ed:9f
65:7d:67:5b:26:5c:ea:50:57:bd:12:7b:c9:e3:52:4c
66:41:ff:81:df:8d:cf:0d:56:6b:e6:93:9d:7e:53:b5
66:ed:dd:78:69:72:2d:5e:b7:bf:0e:a1:e8:eb:88:88
67:14:86:86:93:d4:56:95:78:78:f4:35:85:06:ee:6f
67:2e:73:e9:da:fc:cb:37:fa:d3:4f:7f:83:5e:9f:43
67:c4:07:16:2a:13:e1:66:47:1f:a0:c2:b2:ef:1a:98
67:f9:56:2a:b9:85:9e:c5:f1:6b:42:b1:af:8e:4d:84
68:12:5d:3f:b7:68:17:4f:bc:84:4e:94:32:99:ac:3e
68:81:4c:4d:81:79:60:54:9d:e7:e5:72:7a:09:f5:40
68:93:6c:be:4b:9a:43:88:9a:f8:b7:3f:43:40:d9:6f
68:ce:5b:63:6c:c6:35:bb:86:1f:5f:d9:81:3f:d7:66
69:2b:44:d3:0f:9c:5a:4d:e7:73:8b:a1:3c:ab:a0:28
69:fb:0f:96:90:64:a0:4f:58:49:fd:a5:3f:ed:d9:d4
6a:3c:ab:23:6b:56:df:60:9c:b4:2c:9b:a1:8c:76:46
6a:6b:46:d6:8c:fc:e4:4f:8e:81:89:32:f7:12:0d:5e
6a:ea:05:78:ef:74:76:ed:da:fe:83:31:df:65:e5:62
6a:fb:07:3b:25:9e:b4:27:aa:08:9f:1b:17:29:43:39
6b:1f:00:92:14:ff:4e:58:d9:79:0d:8d:8d:b3:d6:bc
6c:60:4d:f8:61:34:39:6b:bb:e8:fd:66:5e:be:49:a3
6c:89:43:0c:71:ef:3e:36:45:e5:fa:5d:35:90:5f:51
6c:cc:b6:01:82:86:90:8a:90:22:24:54:ca:64:2e:4d
6c:d8:e5:7f:cd:e3:e9:85:4c:0c:b0:7e:00:0a:55:33
6d:66:f9:26:ca:f2:be:a6:6b:c8:bd:f7:b3:ff:79:73
6d:b1:3a:6a:95:87:c4:b4:3e:31:0b:39:48:08:98:14
6e:0e:2c:91:4b:50:fe:af:74:3f:2e:71:57:6f:23:3b
6e:a4:96:d7:0c:e9:f9:e4:67:79:35:89:c4:38:60:05
6e:b3:ed:66:77:22:c9:8f:2b:37:b6:53:eb:aa:2f:fd
6e:e5:c0:bb:0d:79:71:5c:a5:b4:2e:b4:51:44:7d:55
6f:06:5d:25:f9:4c:74:75:de:cc:e8:5a:52:6b:3c:9b
70:63:53:83:e8:2d:1f:71:a4:75:9f:16:25:c9:19:71
70:72:c8:5a:a8:46:fd:4a:0b:75:85:8c:4e:ce:6f:74
71:47:18:eb:ca:f4:f2:11:5c:e7:aa:b1:84:d9:52:dd
72:14:4e:18:36:88:7e:5a:5a:30:3f:fd:1a:6e:a7:69
72:65:94:63:11:82:d0:d2:4f:f5:fb:62:1a:de:ef:b7
73:4b:7d:f4:c3:ed:68:da:f9:ce:9f:10:da:5e:53:e9
73:e0:4e:e2:18:ae:47:59:ce:e5:fc:fd:98:6f:31:ed
73:e6:2e:bd:b2:09:75:f9:58:bf:fd:26:73:8c:48:c1
75:96:b7:87:19:2a:94:51:dd:1b:21:17:af:1f:f3:6c
75:97:82:2e:4b:63:6b:df:86:5b:22:f8:ea:28:4f:1e
75:b2:5d:ea:bb:2e:02:ca:46:89:e6:68:84:dc:fb:0e
75:e4:11:39:ca:2c:6a:6b:a6:b8:89:9f:6c:ef:24:77
76:58:e6:3e:ee:f1:08:df:9a:c8:63:a7:81:2a:a5:fe
77:84:62:ce:a8:54:48:6f:81:63:9d:89:2d:d9:d2:90
78:28:2f:fb:8e:e0:42:52:64:0a:03:06:1a:4c:3d:9c
78:60:64:1c:ea:c2:3e:89:b9:f8:01:bc:f4:a9:39:0a
78:80:cd:7e:99:30:1d:a6:ca:f1:e3:f3:cb:44:2f:d6
79:1f:ff:95:c1:d5:94:fa:8b:02:8b:c4:be:3b:7c:4c
7b:aa:98:f2:5b:df:86:2e:6f:f8:d9:c9:f0:cb:52:29
7c:74:dc:30:ac:65:91:d1:2c:b0:e2:88:71:8a:24:b6
7c:8f:5b:98:f7:f8:ce:77:15:7d:13:8b:0e:a7:de:72
7d:13:07:89:f9:e3:73:2c:f6:59:c9:df:35:90:5f:63
7d:60:3d:a6:3a:c4:17:9d:95:d7:e8:39:ec:6c:ef:ca
7d:9b:5f:5e:a9:01:a4:39:ee:cb:34:a3:28:c3:72:3a
7e:09:14:f1:7b:81:a9:aa:de:69:bb:07:3b:38:e7:ca
7e:3e:36:31:8e:e2:4f:7e:9d:43:f8:59:31:7f:99:10
7e:72:72:fa:07:09:f3:24:14:89:34:7a:83:d4:18:b4
7e:fa:90:60:a3:55:cd:fa:e9:a1:27:d8:00:71:1b:b9
7f:53:2a:4c:6b:26:34:99:4f:3a:7a:6d:c8:82:62:2a
7f:99:2c:ba:25:33:21:af:28:00:11:c4:52:07:c7:46
82:2b:5f:cd:a5:8f:82:3d:04:f9:95:85:a4:8c:25:90
82:2e:bc:f6:19:6e:2d:9a:64:03:d5:8c:5b:d7:ee:dd
82:30:b6:73:69:da:fd:2d:1f:e0:b1:d2:43:f0:34:ee
82:65:1e:0b:5d:1b:65:36:b2:ce:69:7a:c3:3a:f7:b1
82:67:45:b5:15:e8:f5:c2:8f:f4:fe:03:aa:f1:3a:b0
83:4b:60:2e:df:3a:4a:57:8c:ba:be:04:72:b3:a3:f0
83:7b:11:8c:3f:bb:e9:a4:45:04:a4:06:c5:e0:73:92
83:e9:4a:a9:77:8d:4f:44:f9:ff:34:e1:a3:4d:9e:44
83:f5:b2:4d:31:b0:49:79:bd:ed:44:c0:58:57:82:a8
85:57:57:9a:27:3f:00:f4:d1:bb:6f:ab:ef:d4:f2:a0
85:af:1e:99:bf:43:07:b9:c1:af:cc:c8:6e:da:1f:4c
87:f6:f1:76:a0:f0:cb:19:94:b7:ad:1f:f8:9b:72:cc
88:2d:c3:e4:b6:54:10:eb:7d:0b:9e:e5:30:3a:0c:cb
88:30:17:27:46:38:31:88:ae:2d:20:79:ba:c7:3b:19
88:51:e5:72:ba:24:d9:79:12:55:99:f0:9f:b0:81:c2
89:7b:9e:8b:18:d4:87:f8:e3:0a:15:fd:c5:46:86:66
89:c7:56:d2:b8:98:86:a4:be:f2:e1:ca:14:ae:bf:82
89:fe:6f:a0:ec:98:85:69:16:1a:3a:40:d2:96:75:03
8b:ea:07:e7:63:29:e7:86:8c:2a:9c:1d:1b:c6:f0:4a
8c:2f:0f:53:17:49:54:9e:91:2b:be:ff:2b:5a:2c:24
8c:a9:e4:54:05:5f:eb:7e:51:01:86:d0:38:6f:44:ad
8c:e7:4e:7d:12:72:51:00:2d:62:52:7a:01:26:7e:c0
8d:15:06:83:38:b3:62:32:a0:24:d2:3e:38:83:0f:c1
8d:5f:8d:d0:65:66:32:37:7e:88:e8:c0:b1:46:70:be
8d:f8:e9:d2:c6:04:69:44:70:cd:2a:b5:a5:08:b2:ec
8e:0f:75:96:b5:fa:c6:48:88:64:cb:da:a1:6e:99:e1
8e:26:ab:7a:ba:28:b1:f1:14:f8:24:53:6c:bd:c1:16
8e:2b:3b:25:a1:0d:29:00:a1:be:3f:92:ce:cc:bf:a3
8e:35:0b:f7:b3:b6:cb:70:f2:c3:91:a9:53:a3:7a:7a
8e:3a:95:69:74:92:6f:22:51:a1:13:38:71:a3:92:ba
8f:8e:bf:ca:e8:96:c0:f5:d6:49:65:8f:21:94:b6:4d
8f:db:2a:23:13:fb:b0:8d:5b:d2:97:88:bc:8b:65:c4
90:99:41:bc:8a:b6:fa:10:67:28:cd:4b:32:71:86:ba
92:7b:a0:41:28:49:4d:c4:de:c4:39:e2:b9:a9:80:c0
93:53:5d:0c:3f:c4:61:3d:12:1c:94:78:69:6d:c8:b3
93:ce:82:a8:ca:1a:52:f4:a2:bd:3d:87:44:30:bd:b8
94:b3:79:3e:6f:79:8d:4c:56:43:93:57:06:d5:e6:72
94:b6:86:e2:99:6a:b0:78:2d:4c:04:eb:c4:7b:df:a8
94:fc:07:72:4e:85:fb:eb:f5:fd:26:63:41:39:12:84
95:3a:f1:45:11:c9:1a:f0:6b:c3:aa:8d:e2:8c:af:6c
95:c2:c5:10:e8:32:01:ba:15:1e:77:24:06:2c:5a:76
95:c5:02:66:c0:1a:a0:0e:f5:d9:47:bf:73:f7:ba:5f
95:f8:83:50:50:57:30:05:4c:95:41:a9:3f:35:be:1a
96:ee:a9:c9:98:f3:97:a5:8d:39:36:28:33:b7:25:6b
97:ee:a2:8e:aa:37:50:fc:b2:57:a3:df:99:57:be:b7
98:a9:7d:15:e8:3e:aa:7b:74:b9:0f:ac:84:a3:ca:fc
99:2e:1d:83:ac:1c:51:04:1d:be:9c:7c:1b:ff:67:b7
99:b4:32:76:56:83:2b:80:32:1d:87:a6:b7:9d:1e:83
9a:15:c9:31:07:f5:73:fb:ec:06:25:14:34:dd:b4:57
9a:1d:26:28:1e:89:2c:a8:27:3c:9d:33:ce:96:ac:55
9a:6e:b7:09:ab:d4:c2:77:96:36:e8:dc:32:eb:6a:ca
9a:74:47:53:6d:4f:c5:c7:37:55:15:9a:2f:45:f2:82
9a:a0:f3:7d:fb:27:b7:18:ec:36:46:9b:c1:d3:ef:ab
9b:8d:34:6a:60:93:98:fc:29:58:71:f3:7b:ed:de:c0
9b:c2:a1:df:3a:2e:f3:7d:2e:a3:31:6f:d6:9c:d9:41
9c:06:81:a0:b0:4d:57:fd:b1:88:fa:bc:fc:24:4a:41
9d:1f:16:4a:09:39:e0:7f:9a:2f:ee:ce:a1:74:31:b3
9d:a5:0d:73:a8:26:88:3b:d2:ba:fd:ba:e2:a1:25:9a
9d:d7:43:41:66:e9:dd:23:15:80:91:6f:52:e3:78:ca
9e:36:98:9b:77:61:17:a8:59:8a:66:36:75:c5:fb:22
a0:1b:f0:49:00:e6:dd:e2:8e:17:44:bd:2f:af:04:b6
a0:29:d8:55:29:66:23:1c:a0:40:59:ff:28:10:a9:13
a0:37:3f:4c:cc:bf:cc:97:79:a7:25:a5:fe:c5:e0:a7
a0:7e:dc:3f:58:c1:cd:c5:19:fb:de:b9:08:a5:3f:44
a0:91:da:8a:f9:4f:0e:77:c3:58:21:30:a3:47:20:56
a0:bc:c9:19:2a:a6:a9:b0:02:a5:0b:3e:ee:74:1e:13
a0:c4:e4:bb:1c:5f:92:00:98:6c:52:fc:42:43:e9:b4
a1:40:55:a0:62:00:16:3d:95:32:2e:cf:56:02:b0:7e
a1:ac:54:8f:43:4b:a3:36:93:1d:b1:a2:bc:14:37:53
a2:7d:01:e4:b1:fa:f4:74:59:e3:72:25:52:9e:94:98
a3:1b:23:8d:ce:a0:b0:46:c7:f4:9e:19:c3:55:b8:ad
a3:47:cd:e5:01:6d:4e:75:47:1d:8a:15:c8:47:6f:60
a3:50:67:5c:a0:24:b1:3b:17:0f:2a:23:d4:fe:f4:03
a4:4b:7c:19:b6:a5:45:2a:fe:1d:6b:a7:46:e8:93:56
a4:be:df:16:dc:07:93:53:58:8c:05:0a:6c:7e:fd:a7
a5:64:7a:6e:d7:0d:28:86:98:29:50:1b:7d:e3:fc:15
a5:d4:65:30:dd:6b:c0:bb:b7:2e:b6:bf:1e:e2:79:9c
a6:9f:f8:2d:e2:53:a8:88:30:b9:45:c0:15:2d:3e:fa
a7:e0:e6:87:03:fe:09:a0:c4:00:aa:e2:ca:32:56:1b
a8:28:28:4d:36:04:f1:ff:ec:aa:a3:ae:08:0c:ed:75
aa:c5:d3:08:34:21:df:6e:1f:05:7f:c3:7d:ff:37:67
aa:ca:ab:94:45:b4:c4:3e:f3:0f:af:a2:b6:3c:7e:1b
aa:e7:af:b6:bf:6c:a6:bd:e8:22:6c:9f:78:9e:aa:24
aa:f3:39:39:58:2b:61:ad:e8:d3:7a:f2:d2:e9:dd:7a
ab:06:a7:41:12:7a:05:43:c1:41:ec:c5:bb:93:35:42
ab:75:cb:f0:35:4e:79:6e:6e:1c:1c:d4:43:9e:17:96
ac:fc:e7:c9:89:49:cc:a2:a1:d1:34:bc:e7:ec:b3:41
ad:16:c9:69:58:ea:04:4a:de:d5:a4:89:1f:fc:67:0c
ad:7e:66:ea:0f:3a:d9:1d:9b:73:4d:66:a1:78:94:23
ad:9d:8d:d9:4f:75:d3:25:10:b2:5a:01:82:8b:94:8a
ad:ff:86:0f:b8:ec:2f:e3:a1:fe:ea:0a:50:9f:0d:2c
ae:75:ea:cf:c1:35:35:91:fb:1c:98:61:70:04:bb:8d
af:18:f2:d3:ce:4c:9b:ab:08:17:b4:21:0e:25:a9:c2
af:57:2c:11:a2:13:3d:b2:07:a5:35:18:74:3f:69:7f
af:75:63:0b:6f:9e:73:f2:fd:a7:96:3f:cb:94:b0:a2
af:91:d8:39:8a:3d:18:83:2b:8e:86:29:56:2f:b0:c1
b0:73:6d:c2:a7:68:cd:fd:90:d4:b3:3f:a3:81:94:7b
b0:81:a5:6d:b1:7c:f4:d9:4b:36:66:0a:e4:fd:93:d8
b0:90:da:33:e9:f4:7b:89:49:91:86:a0:34:95:4e:12
b1:6f:df:5c:86:f9:c8:1b:5d:d6:3a:a6:01:81:f0:e4
b1:86:b0:d1:ed:34:d4:bf:af:ca:bf:d3:2f:59:bc:92
b2:69:82:2f:25:48:bb:fc:62:c7:9a:de:41:42:13:55
b3:36:05:ea:e3:37:b8:32:c7:0a:f4:e1:54:de:85:f0
b3:67:0b:de:16:bd:e8:2f:00:82:f7:7f:5e:bb:e7:c5
b3:b2:90:80:10:61:de:9b:3d:7e:01:51:a1:00:f3:7d
b4:ac:a8:4b:7a:c0:bc:5f:0f:b0:c5:33:aa:3e:b5:d2
b5:21:b3:41:09:72:79:b6:83:c7:37:19:3b:4f:44:d7
b5:55:f0:96:fd:d7:e9:ab:87:b3:47:95:d1:d6:34:8c
b6:15:96:b7:44:b6:e8:f7:5b:de:e6:ba:71:87:5b:3b
b6:e4:c1:93:02:07:d1:7b:3f:22:1b:27:c3:f8:db:8b
b6:e5:f7:47:5e:38:01:bb:bd:8f:ef:43:a0:66:c7:fc
ba:a8:de:48:a7:c1:0b:45:35:88:ea:2b:23:c5:7c:9a
ba:c4:bb:99:88:27:d4:f6:f7:7b:58:e4:77:78:fd:1a
bb:cb:1f:49:1f:af:b7:97:98:f0:40:bc:51:5f:79:a4
bc:52:e6:ca:85:c7:9b:fb:88:21:90:8a:34:60:92:52
bc:cb:98:b4:35:21:ee:2b:95:dc:47:8c:b2:18:46:ff
bd:ab:bb:36:f7:ee:27:cf:2a:f4:bf:40:0b:97:31:5d
bd:b2:8d:9f:93:43:d2:2d:ba:6a:23:30:cc:6d:0e:d6
bd:c5:05:37:f2:fd:a1:43:67:15:de:b1:7a:60:e8:48
bd:f3:49:83:6d:96:55:0f:e7:96:90:35:99:ee:95:5a
be:1b:13:e2:ba:c4:ae:af:7c:80:00:14:a8:f4:ce:ca
be:8b:05:76:54:72:b8:74:f2:83:41:36:43:b4:c6:d2
be:f9:71:f4:ef:d7:8e:30:e5:a8:10:88:c8:c5:22:18
bf:5b:39:de:d8:50:ac:22:42:4c:25:c9:dd:06:71:5a
bf:9a:57:3c:a1:73:fe:ba:49:5c:ca:01:44:10:53:54
bf:b9:11:3d:24:51:b7:88:6d:93:07:07:1d:58:85:fe
bf:f0:05:39:00:1a:16:3a:5f:e3:09:e3:2c:49:7f:f6
c0:cb:fd:07:33:e9:62:14:6b:fb:d5:26:54:f3:c5:0d
c1:39:f4:ae:dd:c3:61:d5:d9:d0:ad:97:5e:20:42:e8
c1:9a:75:7e:09:b3:1a:a3:95:4f:7f:85:67:44:2b:48
c1:ae:ec:68:a2:86:03:1b:5d:69:04:c5:85:ca:93:39
c1:b2:88:31:97:20:de:2d:e5:32:ee:2a:49:e3:55:2a
c2:38:c4:01:68:27:81:20:a0:b6:5b:5b:bd:ec:43:ea
c2:91:17:3f:8d:10:fc:3c:d0:10:da:e2:3a:13:59:ed
c2:ae:3e:a7:a2:ab:a2:54:49:30:05:b8:6a:b9:dd:43
c2:fe:4f:17:d1:f3:57:fe:2f:5e:1f:49:03:2f:44:51
c3:20:31:a4:58:2c:6f:2a:5a:a8:37:90:52:91:9c:f9
c3:6f:55:73:5f:ea:df:0a:f3:10:ce:dc:cb:5e:51:ca
c3:ac:b3:ad:0d:83:aa:21:47:cd:10:ae:b2:dd:89:f7
c4:21:8d:35:de:1d:7d:61:b0:b6:7d:2c:dd:3c:53:52
c4:3a:25:19:87:62:6f:b0:f8:39:3c:35:6f:37:6d:ba
c4:d1:a4:cd:0f:64:ef:ea:e6:9a:a4:7c:c3:3d:8a:1f
c4:d6:a7:16:ff:60:70:8e:4a:56:b5:14:f3:2d:c5:53
c4:fe:cb:8c:35:63:0c:df:a9:8e:a2:6e:47:d1:be:22
c5:07:24:6e:9c:05:33:42:b0:ab:9e:4e:7b:21:da:48
c5:60:9c:5c:0b:ed:c1:7a:b4:7e:9e:13:a1:0c:2d:c1
c5:a4:31:76:ec:41:c3:0a:c5:82:66:96:71:fe:cd:b5
c6:06:2d:09:88:a0:28:23:45:49:98:5f:eb:64:94:4f
c6:29:b8:06:1e:98:3b:ac:48:4e:5c:80:4e:ac:b0:9e
c6:30:27:20:6e:28:01:86:9f:d5:cd:0a:11:39:0c:14
c7:37:f4:02:c4:05:e0:76:a1:b8:43:54:9e:8e:6c:85
c7:c7:29:68:c4:0a:2b:2f:05:1f:d1:27:75:44:3d:f1
c8:23:ea:d9:6e:a3:aa:88:de:81:d9:78:9b:2c:c7:fc
c8:2a:69:78:1d:43:46:7c:8a:e6:03:5e:a4:a2:5d:8d
c9:2d:0d:c3:b7:5f:31:73:1c:ee:08:44:12:49:5b:fc
c9:4e:95:21:03:3c:db:32:57:dd:0a:56:bd:b8:dc:c4
c9:67:4f:17:4d:74:58:1b:b0:62:f3:88:b2:8a:f6:95
ca:bf:3e:e2:7e:04:89:2f:22:b4:df:70:ea:09:2a:67
cc:73:bb:df:1d:8b:ff:aa:32:37:d9:87:e7:3f:c1:c1
cc:76:5f:3b:00:39:d3:89:9a:f4:e1:eb:01:3f:7c:d3
cd:a8:0b:95:c8:a3:3b:df:15:ff:8f:5f:d1:91:27:9e
cd:fa:c2:73:da:2c:e6:1f:39:cb:1f:83:d9:96:2a:f0
ce:a5:ac:7e:2e:a5:e6:53:9f:1c:f3:66:ae:bf:de:65
ce:a7:d9:f1:b2:e9:d3:44:da:0e:b4:c6:3f:31:64:44
cf:1d:19:7c:2f:35:b5:76:c6:76:dc:b0:fc:aa:dd:57
cf:dc:13:cf:5e:60:74:38:f5:0c:64:2d:ba:35:e3:ed
d0:04:d3:8e:f0:f1:18:58:61:02:94:17:04:69:82:54
d0:ef:0c:62:38:d9:4c:48:a7:ea:a6:54:90:82:dc:ab
d2:69:3f:c5:77:8d:6d:9d:06:47:4b:8f:7c:5f:98:6d
d4:3a:a8:99:e2:fb:e7:c5:5b:89:d9:f2:30:66:d6:6a
d5:bd:86:b0:93:96:42:9d:ef:f5:6e:5a:b2:5a:1a:82
d6:20:9e:0e:55:d9:ea:3d:4a:41:ef:c8:08:d2:ba:20
d6:92:21:4b:04:3b:22:f5:ee:85:0a:63:bf:b3:fe:9b
d6:ac:83:bd:60:ca:5d:c7:de:28:af:e7:23:6a:32:aa
d8:5c:5e:a2:46:cb:ca:75:cf:25:2c:a2:30:a6:3a:28
d8:bc:92:66:15:6e:ba:10:27:9b:20:e8:00:16:8d:01
d8:f7:67:ba:88:42:a4:9f:82:89:08:7f:4f:2f:d1:7a
d9:60:88:5c:c5:75:1b:ca:35:37:49:e0:e4:4a:16:40
d9:99:f4:ef:47:03:06:50:59:b0:fb:7b:e1:9a:6a:48
d9:a3:ab:3d:70:3b:17:05:7b:1f:31:c8:1b:00:22:59
d9:b8:23:c6:ad:1c:23:bc:af:e8:46:f9:aa:06:bb:c1
d9:ea:33:14:7a:cb:9f:85:0e:80:2d:b0:26:23:5b:26
da:1c:39:e0:e8:2d:92:eb:48:89:35:5c:2e:bc:fb:e3
da:47:7e:9e:9d:24:d9:c2:43:f1:07:e9:62:8b:79:f5
da:72:2b:e5:5e:ad:14:60:58:ea:65:15:fa:ca:3c:a7
db:72:a1:11:3d:bd:b4:c3:b1:5c:54:a6:02:a1:30:46
dc:32:3a:ed:50:e9:d2:eb:8b:96:60:5d:3d:65:68:1f
dc:40:6b:2b:2c:06:b3:2c:61:fb:61:25:ed:2e:4c:c0
dc:88:4b:c9:1f:74:b9:6f:c0:6c:00:69:46:5d:80:ee
dc:da:ab:82:6e:35:9f:df:44:32:2c:30:17:97:e5:72
df:cb:ba:fe:21:32:41:0a:b4:a6:c9:bd:90:b6:61:6b
e0:26:f2:cd:ac:af:8b:08:ce:11:94:84:7c:ae:f9:32
e0:7a:45:6f:19:ac:69:86:e4:7d:64:92:15:9e:ce:2d
e0:a7:80:40:8f:be:e6:ae:13:7c:f3:bb:08:0f:22:7a
e2:1f:e5:f5:f0:f9:dc:cd:83:44:c7:b7:4e:ab:06:32
e2:47:7f:8e:1c:14:f9:dc:b8:81:1f:f5:58:f3:b6:d5
e2:99:f4:ff:85:95:5a:5d:50:2e:42:d1:d2:65:40:38
e3:a2:f0:47:f1:a7:bd:3b:07:0c:fb:82:e1:9f:6e:43
e3:fb:64:9f:68:d6:3f:fa:1a:17:cd:51:3e:56:70:04
e5:2e:ca:45:83:73:d6:39:64:1a:0e:26:56:68:47:22
e6:d9:f4:b0:08:68:54:34:58:78:5d:b6:f1:cf:58:27
e7:5b:88:f9:81:62:7b:5a:77:f1:69:7d:70:99:62:4f
e7:d5:94:31:00:97:9e:78:be:e0:07:b2:55:47:f0:08
e7:dd:29:a1:dc:72:d6:89:33:c5:d7:e8:c6:97:18:67
e9:09:87:1b:45:9d:f4:08:f3:de:f0:9c:b8:4d:e6:1f
ea:69:7a:b9:d2:ae:ac:32:73:2c:28:39:da:f7:6c:e0
ea:f1:08:a0:6e:29:87:9d:61:66:97:3e:63:90:2e:d1
ea:f5:52:98:b3:1f:c2:c8:4c:50:b7:ad:b5:f6:48:de
eb:05:63:fb:41:e1:f6:b7:85:97:e8:3f:62:a8:49:14
eb:ad:4e:1d:84:9a:28:26:8f:1d:39:f3:c3:a0:4d:15
eb:e7:fe:a2:56:82:df:51:a6:78:9f:15:56:60:84:40
eb:f6:6f:e1:02:3b:ce:58:33:e0:c1:25:d6:0c:1f:f3
ec:50:30:97:7e:f2:32:b1:c8:2f:34:4c:fe:08:27:9d
ed:20:b7:8d:42:21:50:6c:bc:f4:29:ca:a6:3a:cf:46
ed:92:f8:b9:66:72:bd:13:35:56:81:ea:dd:b6:27:06
ed:c2:94:f6:22:bb:73:75:28:e8:15:c4:25:88:c3:da
ef:36:9d:65:68:c9:b3:3c:97:8d:8e:fe:e3:85:96:e0
ef:4e:1a:5c:7e:e5:64:34:5c:0b:be:74:87:38:5b:00
ef:67:01:08:80:a6:2f:78:78:1f:28:5c:a9:ec:ca:04
ef:72:c1:69:4d:46:57:e1:11:b5:30:89:f6:19:71:7c
f0:7d:6f:d9:43:c6:82:fd:43:38:31:f7:90:fc:6d:50
f0:95:7d:70:82:5c:61:a0:b6:cb:52:1a:1e:9e:48:2c
f0:d7:3a:8b:2c:96:40:6e:7f:c8:18:15:50:8f:77:9c
f2:04:f8:b4:51:ea:ba:78:78:7e:de:31:23:ab:0c:d8
f2:4c:b3:0f:c8:37:3d:41:52:0f:ba:b5:da:1a:d0:da
f2:b9:20:c8:1a:da:d3:a3:51:49:13:56:a4:4a:8c:83
f3:5c:f6:c2:4d:d2:64:e0:66:77:e6:25:7a:20:62:d1
f3:64:59:8d:a4:b8:ca:da:a8:46:1d:c4:d6:f0:46:6f
f3:cc:e8:24:50:56:d5:07:5f:b0:32:00:57:fc:fa:c0
f3:e6:80:aa:25:f3:e6:1a:85:26:4e:a0:a4:fb:c3:78
f4:81:fe:4d:42:f3:69:c3:0a:1f:05:82:20:e2:3e:dd
f5:05:c6:cd:cd:fb:49:b1:f7:c9:73:36:d2:5f:43:89
f5:f6:25:03:16:7c:46:a4:40:91:ea:9f:27:75:63:b7
f6:3c:78:64:ba:7c:3a:9a:75:86:86:6c:fe:2e:e3:17
f6:97:2d:6a:7a:6a:25:ab:d5:ab:3d:2b:b6:23:67:53
f6:9d:76:e1:1b:31:0a:3b:9a:06:95:bc:13:6a:e6:13
f8:a2:e7:eb:55:a2:59:da:d1:e9:ee:26:4d:d2:7e:c5
f9:98:93:1b:f9:d7:6b:12:ef:ac:64:0d:e3:f4:00:26
fa:00:ad:3e:4a:69:72:da:bc:f0:36:21:73:70:57:01
fa:69:86:20:03:78:c9:64:d8:18:18:25:b5:b5:23:ea
fa:84:e1:cc:df:f5:e0:0f:2b:5e:87:f6:c6:fb:ad:31
fa:95:94:fa:66:5e:45:52:f7:6d:69:df:47:71:69:3b
fa:e7:29:c6:36:53:99:f7:cb:23:7d:92:9e:45:09:5a
fa:ef:2f:ae:01:91:4f:c3:bd:76:36:ac:f0:24:67:77
fb:67:7c:e2:a3:8b:30:1c:19:2e:83:12:7a:99:63:12
fb:a5:24:fd:a4:b0:19:d4:2f:a2:9c:21:40:ef:dd:ad
fc:02:65:ae:b4:8a:d6:64:05:7c:2d:53:11:7c:a0:e0
fc:09:1e:1d:6c:9d:82:b0:fd:89:a9:78:45:e9:e0:bd
fc:cb:23:aa:46:7d:93:7f:e2:97:ce:b5:fe:aa:da:15
fc:eb:1b:fb:50:fe:da:2e:47:e6:fd:16:5e:b6:ed:35
fd:82:6b:62:dc:be:29:f0:eb:86:ea:d3:83:96:aa:30
fd:b3:81:5a:44:67:8b:cc:a0:af:3c:36:22:79:c5:7e
fe:07:a4:38:40:77:0f:8e:64:bd:21:a0:89:1e:cb:e9
fe:23:28:25:44:81:de:d5:e2:f5:d8:3e:99:1b:7d:3e
fe:40:71:ce:55:65:e6:f8:77:74:dd:28:5d:0a:d4:05
fe:44:16:c2:34:62:80:df:80:74:86:29:2e:77:db:13
ff:2f:b9:d0:a0:01:9d:c7:a2:6c:83:81:c2:2e:4c:ae

Просмотреть файл

@ -1,3 +0,0 @@
min_rsync=2.6.4
min_openssh=5.6
min_jq=1.5

Просмотреть файл

@ -1,18 +0,0 @@
#!/usr/bin/env bash
#/ track-progress: track progress of backup or restore tasks
progress(){
## Those progress files should be created by init_progress function
## If they are not present (e.g., individual script is being invoked directly),
## we will not track progress
if [ -f "/tmp/backup-utils-progress/progress" ] &&
[ -f "/tmp/backup-utils-progress/total" ] &&
[ -f "/tmp/backup-utils-progress/type" ]; then
PROGRESS=$(cat /tmp/backup-utils-progress/progress)
PROGRESS_TOTAL=$(cat /tmp/backup-utils-progress/total)
PROGRESS_TYPE=$(cat /tmp/backup-utils-progress/type)
PROGRESS_PERCENT=$( echo "scale = 2; ($PROGRESS / $PROGRESS_TOTAL) * 100" | bc)
echo $((PROGRESS + 1)) > /tmp/backup-utils-progress/progress
echo "${PROGRESS_TYPE} progress: $PROGRESS_PERCENT % ($PROGRESS / $PROGRESS_TOTAL ) $1 " > /tmp/backup-utils-progress/info
fi
}

Просмотреть файл

@ -1 +0,0 @@
3.11.0

Просмотреть файл

@ -1,36 +0,0 @@
## Writing tests
See also the [Bash style guide](https://github.com/github/backup-utils/tree/master/STYLEGUIDE.md)
---
##### All tests should use `set -e` and call `setup` before making any assertions
Like this:
```bash
begin_test "echo works"
(
set -e
setup
echo passing | grep passing
)
end_test
```
---
##### Resist the urge to disable `set -e`
If you want to assert failure, please resist the urge to disable `set -e` and
instead use negation with `!`:
```bash
begin_test "netcat is not from bsd"
(
set -e
setup
! nc -h 2>&1 | grep bsd
)
end_test
```

Просмотреть файл

@ -1,11 +0,0 @@
# Test backup config
# Fake backup host with a non-standard port.
GHE_HOSTNAME="localhost:122"
# Fake out ionice on environments that don't have it (OS X)
if ! type ionice 1>/dev/null 2>&1; then
GHE_IONICE="ionice-stub"
fi
# vim: ft=sh

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-true

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-true

Просмотреть файл

@ -1,22 +0,0 @@
#!/usr/bin/env bash
# Usage: curl ...
# Fake curl command stub for tests.
set -e
# Return empty list of indexes for ghe-backup-es-audit-log
if echo "$@" | grep -q '_cat/indices/'; then
exit 0
# Exit with non-zero code to mimic a non-existant index
elif echo "$@" | grep -q 'localhost:9201/audit_log'; then
exit 1
fi
# Write args to stdout
echo "$@"
# Read from stdin and write to stdout when "-d @-" given.
for _ in "$@"; do
if [ "$1" = "@-" ]; then
cat
fi
done

Просмотреть файл

@ -1,31 +0,0 @@
#!/usr/bin/env bash
# Usage: dgit-cluster-backup-routes
# Emulates the remote GitHub dgit-cluster-backup-routes command. Tests use this
# to assert that all repos and gists get backed up.
set -e
version() {
echo "${@#v}" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }';
}
if [ -z "$GHE_REMOTE_VERSION" ]; then
echo GHE_REMOTE_VERSION must be set for this script to work.
exit 1
fi
# The list of gists returned by the source changed in 2.16.23, 2.17.14,
# 2.18.8, and 2.19.3. We need to account for this difference here.
if [[ "$GHE_REMOTE_VERSION" =~ 2.16. && "$(version $GHE_REMOTE_VERSION)" -ge "$(version 2.16.23)" ]] || \
[[ "$GHE_REMOTE_VERSION" =~ 2.17. && "$(version $GHE_REMOTE_VERSION)" -ge "$(version 2.17.14)" ]] || \
[[ "$GHE_REMOTE_VERSION" =~ 2.18. && "$(version $GHE_REMOTE_VERSION)" -ge "$(version 2.18.8)" ]] || \
[[ "$(version $GHE_REMOTE_VERSION)" -ge "$(version 2.19.3)" ]]; then
echo 0/nw/01/aa/3f/1234 git-server-fake-uuid
echo 1/nw/23/bb/4c/2345 git-server-fake-uuid1
echo 0/01/aa/3f/gist/93069ad4c391b6203f183e147d52a97a.git git-server-fake-uuid2
echo 1/23/bb/4c/gist/1234.git git-server-fake-uuid
else
echo 0/nw/01/aa/3f/1234 git-server-fake-uuid
echo 1/nw/23/bb/4c/2345 git-server-fake-uuid1
echo 0/01/aa/3f/gist git-server-fake-uuid2
echo 1/23/bb/4c/gist git-server-fake-uuid
fi

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-finalize-command

Просмотреть файл

@ -1,9 +0,0 @@
#!/usr/bin/env bash
# Usage: dgit-cluster-restore-routes
# Emulates the remote GitHub dgit-cluster-restore-routes commands. Tests use this
# to assert that the command was executed.
set -e
cat <<EOF
0/nw/01/aa/3f/1234 git-server-fake-uuid git-server-fake-uuid1
1/nw/23/bb/4c/2345 git-server-fake-uuid1 git-server-fake-uuid2
EOF

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-finalize-command

Просмотреть файл

@ -1,9 +0,0 @@
#!/usr/bin/env bash
# Usage: dpages-cluster-restore-routes
# Emulates the remote GitHub dpages-cluster-restore-routes command. Tests use this
# to assert that the command was executed.
set -e
cat <<EOF
4/c8/1e/72/2 git-server-fake-uuid git-server-fake-uuid2
4/c1/6a/53/31 git-server-fake-uuid1
EOF

Просмотреть файл

@ -1,6 +0,0 @@
#!/usr/bin/env bash
# Usage: enterprise-configure
# Emulates the remote GitHub enterprise-configure command. Tests use this
# to assert that the command was executed.
set -e
echo "enterprise-configure OK"

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-true

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-import-command

Просмотреть файл

@ -1 +0,0 @@
ghe-fake-import-command

Просмотреть файл

@ -1,6 +0,0 @@
#!/usr/bin/env bash
# Usage: ghe-cluster-config-apply
# Emulates the remote GitHub ghe-cluster-config-apply command. Tests use this
# to assert that the command was executed.
set -e
echo "ghe-cluster-config-apply OK"

Просмотреть файл

@ -1,6 +0,0 @@
#!/usr/bin/env bash
# Usage: ghe-cluster-config-update
# Emulates the remote GitHub ghe-cluster-config-update command. Tests use this
# to assert that the command was executed.
set -e
echo "ghe-cluster-config-update OK"

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше