diff --git a/docker/mysql-xtrabackup-final/xbackup-wrapper.sh b/docker/mysql-xtrabackup-final/xbackup-wrapper.sh index 4084f0c4..8e29af56 100644 --- a/docker/mysql-xtrabackup-final/xbackup-wrapper.sh +++ b/docker/mysql-xtrabackup-final/xbackup-wrapper.sh @@ -5,8 +5,8 @@ exec > /tmp/xtrabackup-launch.log 2>&1 cd /root # if you recover into a clean system, initial xbackup.sh init was run back then and should not repeat -if [ -f restore-process-complete ]; then - if [ ! -f xtrabackup.database.txt ]; then +if [[ -f restore-process-complete ]]; then + if [[ ! -f xtrabackup.database.txt ]]; then echo openemr > xtrabackup.database.txt chmod 600 xtrabackup.database.txt fi @@ -14,12 +14,12 @@ if [ -f restore-process-complete ]; then rm restore-process-complete fi -if [ ! -f allsetup.ok ]; then +if [[ ! -f allsetup.ok ]]; then ./xbackup.sh -u openemr -a && ./xbackup.sh -t full && touch allsetup.ok && exit 0 exit 1 fi -if [ -f force-full-backup ]; then +if [[ -f force-full-backup ]]; then rm force-full-backup ./xbackup.sh -t full exit $? @@ -27,7 +27,7 @@ fi # I don't like forcing it like this, but if the backup fails one day, we need to try it the next # here's the problem: manual run during an automated run will cause destruction and havoc and woe -if [ $(date +%u) = 7 ]; then +if [[ $(date +%u) = 7 ]]; then ./xbackup.sh -t full -f else ./xbackup.sh -t incr -f diff --git a/docker/mysql-xtrabackup/docker-entrypoint.sh b/docker/mysql-xtrabackup/docker-entrypoint.sh index d6249ead..e9f7e9a3 100644 --- a/docker/mysql-xtrabackup/docker-entrypoint.sh +++ b/docker/mysql-xtrabackup/docker-entrypoint.sh @@ -3,7 +3,7 @@ set -eo pipefail shopt -s nullglob # if command starts with an option, prepend mysqld -if [ "${1:0:1}" = '-' ]; then +if [[ "${1:0:1}" = '-' ]]; then set -- mysqld "$@" fi @@ -26,14 +26,14 @@ file_env() { local var="$1" local fileVar="${var}_FILE" local def="${2:-}" - if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + if [[ "${!var:-}" ]] && [[ "${!fileVar:-}" ]]; then echo >&2 "error: both ${var} and ${fileVar} are set (but are exclusive)" exit 1 fi local val="${def}" - if [ "${!var:-}" ]; then + if [[ "${!var:-}" ]]; then val="${!var}" - elif [ "${!fileVar:-}" ]; then + elif [[ "${!fileVar:-}" ]]; then val="$(< "${!fileVar}")" fi export "${var}"="${val}" @@ -65,7 +65,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" -a "$(id -u)" = '0' ]; then mkdir -p "${DATADIR}" chown -R mysql:mysql "${DATADIR}" - if [ -f /root/pending-restore ]; then + if [[ -f /root/pending-restore ]]; then /root/xrecovery-final.sh fi @@ -78,7 +78,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then # Get config DATADIR="$(_get_config 'datadir' "$@")" - if [ ! -d "${DATADIR}/mysql" ]; then + if [[ ! -d "${DATADIR}/mysql" ]]; then file_env 'MYSQL_ROOT_PASSWORD' if [ -z "${MYSQL_ROOT_PASSWORD}" -a -z "${MYSQL_ALLOW_EMPTY_PASSWORD}" -a -z "${MYSQL_RANDOM_ROOT_PASSWORD}" ]; then echo >&2 'error: database is uninitialized and password option is not specified ' @@ -92,7 +92,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then "$@" --initialize-insecure echo 'Database initialized' - if command -v mysql_ssl_rsa_setup > /dev/null && [ ! -e "${DATADIR}/server-key.pem" ]; then + if command -v mysql_ssl_rsa_setup > /dev/null && [[ ! -e "${DATADIR}/server-key.pem" ]]; then # https://github.com/mysql/mysql-server/blob/23032807537d8dd8ee4ec1c4d40f0633cd4e12f9/packaging/deb-in/extra/mysql-systemd-start#L81-L84 echo 'Initializing certificates' mysql_ssl_rsa_setup --datadir="${DATADIR}" @@ -112,17 +112,17 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then echo 'MySQL init process in progress...' sleep 1 done - if [ "${i}" = 0 ]; then + if [[ "${i}" = 0 ]]; then echo >&2 'MySQL init process failed.' exit 1 fi - if [ -z "${MYSQL_INITDB_SKIP_TZINFO}" ]; then + if [[ -z "${MYSQL_INITDB_SKIP_TZINFO}" ]]; then # sed is for https://bugs.mysql.com/bug.php?id=20545 mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql fi - if [ ! -z "${MYSQL_RANDOM_ROOT_PASSWORD}" ]; then + if [[ ! -z "${MYSQL_RANDOM_ROOT_PASSWORD}" ]]; then export MYSQL_ROOT_PASSWORD="$(pwgen -1 32)" echo "GENERATED ROOT PASSWORD: ${MYSQL_ROOT_PASSWORD}" fi @@ -151,12 +151,12 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then printf '%s\n' "${sql[@]}" | "${mysql[@]}" - if [ ! -z "${MYSQL_ROOT_PASSWORD}" ]; then + if [[ ! -z "${MYSQL_ROOT_PASSWORD}" ]]; then mysql+=( -p"${MYSQL_ROOT_PASSWORD}" ) fi file_env 'MYSQL_DATABASE' - if [ "${MYSQL_DATABASE}" ]; then + if [[ "${MYSQL_DATABASE}" ]]; then echo "CREATE DATABASE IF NOT EXISTS \`${MYSQL_DATABASE}\` ;" | "${mysql[@]}" mysql+=( "${MYSQL_DATABASE}" ) fi @@ -166,7 +166,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then if [ "${MYSQL_USER}" -a "${MYSQL_PASSWORD}" ]; then echo "CREATE USER '${MYSQL_USER}'@'%' IDENTIFIED BY '${MYSQL_PASSWORD}' ;" | "${mysql[@]}" - if [ "${MYSQL_DATABASE}" ]; then + if [[ "${MYSQL_DATABASE}" ]]; then echo "GRANT ALL ON \`${MYSQL_DATABASE}\`.* TO '${MYSQL_USER}'@'%' ;" | "${mysql[@]}" fi @@ -184,7 +184,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then echo done - if [ ! -z "${MYSQL_ONETIME_PASSWORD}" ]; then + if [[ ! -z "${MYSQL_ONETIME_PASSWORD}" ]]; then "${mysql[@]}" <<< "ALTER USER 'root'@'%' PASSWORD EXPIRE;" fi if ! kill -s TERM "${pid}" || ! wait "${pid}"; then @@ -198,7 +198,7 @@ if [ "$1" = 'mysqld' -a -z "${wantHelp}" ]; then fi fi -if [ -f /root/pending-restore ]; then +if [[ -f /root/pending-restore ]]; then /root/xrecovery-final.sh fi diff --git a/docker/mysql-xtrabackup/xbackup-wrapper.sh b/docker/mysql-xtrabackup/xbackup-wrapper.sh index 4084f0c4..8e29af56 100644 --- a/docker/mysql-xtrabackup/xbackup-wrapper.sh +++ b/docker/mysql-xtrabackup/xbackup-wrapper.sh @@ -5,8 +5,8 @@ exec > /tmp/xtrabackup-launch.log 2>&1 cd /root # if you recover into a clean system, initial xbackup.sh init was run back then and should not repeat -if [ -f restore-process-complete ]; then - if [ ! -f xtrabackup.database.txt ]; then +if [[ -f restore-process-complete ]]; then + if [[ ! -f xtrabackup.database.txt ]]; then echo openemr > xtrabackup.database.txt chmod 600 xtrabackup.database.txt fi @@ -14,12 +14,12 @@ if [ -f restore-process-complete ]; then rm restore-process-complete fi -if [ ! -f allsetup.ok ]; then +if [[ ! -f allsetup.ok ]]; then ./xbackup.sh -u openemr -a && ./xbackup.sh -t full && touch allsetup.ok && exit 0 exit 1 fi -if [ -f force-full-backup ]; then +if [[ -f force-full-backup ]]; then rm force-full-backup ./xbackup.sh -t full exit $? @@ -27,7 +27,7 @@ fi # I don't like forcing it like this, but if the backup fails one day, we need to try it the next # here's the problem: manual run during an automated run will cause destruction and havoc and woe -if [ $(date +%u) = 7 ]; then +if [[ $(date +%u) = 7 ]]; then ./xbackup.sh -t full -f else ./xbackup.sh -t incr -f diff --git a/docker/mysql-xtrabackup/xbackup.sh b/docker/mysql-xtrabackup/xbackup.sh index 9b67eac7..2ac5c11d 100644 --- a/docker/mysql-xtrabackup/xbackup.sh +++ b/docker/mysql-xtrabackup/xbackup.sh @@ -119,7 +119,7 @@ while getopts "t:s:i:b:d:l:u:m:fa" OPTION; do done # We need at least one arg, the backup type -[ "x${CREATETABLE}" != "x1" ] && [ $# -lt 1 -o -z "${BKP_TYPE}" ] && { usage; exit 1; } +[[ "x${CREATETABLE}" != "x1" ]] && [ $# -lt 1 -o -z "${BKP_TYPE}" ] && { usage; exit 1; } # log-bin filename format, used when copying binary logs BNLGFMT=mysql-bin @@ -155,13 +155,13 @@ DEFAULTS_FILE=/root/innobackupex.cnf # holds name of DB for xtradb_backups table DATABASE_FILE=/root/xtrabackup.database.txt -if [ "x${RESETDATABASE}" = "x1" ]; then +if [[ "x${RESETDATABASE}" = "x1" ]]; then # write the file containing the name of the DB xtrabackup should store data in echo ${NEWDATABASENAME} > ${DATABASE_FILE} chmod 600 ${DATABASE_FILE} fi -if [ ! -f ${DATABASE_FILE} ]; then +if [[ ! -f ${DATABASE_FILE} ]]; then echo no defined xtradb reporting database: cannot proceed exit 1 else @@ -236,7 +236,7 @@ _sql_query() { for r in {1..${_try}}; do ${MY} -BNe "${1}" > /tmp/xbackup.sql.out 2>&1 _ret=${PIPESTATUS[0]} - if [ "x${_ret}" != "x0" ]; then + if [[ "x${_ret}" != "x0" ]]; then sleep ${_sleep} else cat ${_out} @@ -245,7 +245,7 @@ _sql_query() { fi done - if [ "x${_ret}" != "x0" ]; then + if [[ "x${_ret}" != "x0" ]]; then _s_inf "FATAL: Failed to execute SQL after attempting ${_try} times every ${_sleep} seconds, giving up!" | tee -a ${_out} _s_inf "SQL: ${1}" | tee -a ${_out} cat ${_out} @@ -390,7 +390,7 @@ EOF } _error_handler() { - [ -f /tmp/xbackup.sql.out ] && cat /tmp/xbackup.sql.out \ + [[ -f /tmp/xbackup.sql.out ]] && cat /tmp/xbackup.sql.out \ && rm -rf /tmp/xbackup.sql.out _d_inf "FATAL: Backup failed, please investigate!" } @@ -399,30 +399,30 @@ trap '_error_handler' TERM XBACKUP_PID=$$ # do we need to do first-time table creation? -if [ "x${CREATETABLE}" = "x1" ]; then +if [[ "x${CREATETABLE}" = "x1" ]]; then echo attempting schema creation... _sql_query "${TBL}" echo schema creation success! exit 0 fi -if [ -f /tmp/xbackup.lock ]; then +if [[ -f /tmp/xbackup.lock ]]; then _d_inf "ERROR: Another backup is still running or a previous \ backup failed, please investigate!"; fi touch /tmp/xbackup.lock -if [ ! -n "${BKP_TYPE}" ]; then _d_inf "ERROR: No backup type specified!"; fi +if [[ ! -n "${BKP_TYPE}" ]]; then _d_inf "ERROR: No backup type specified!"; fi _s_inf "INFO: Backup type: ${BKP_TYPE}" -[ -d ${STOR_DIR} ] || \ +[[ -d ${STOR_DIR} ]] || \ _d_inf "ERROR: STOR_DIR ${STOR_DIR} does not exist, \ I will not create this automatically!" -[ -d ${WORK_DIR} ] || \ +[[ -d ${WORK_DIR} ]] || \ _d_inf "ERROR: WORK_DIR ${WORK_DIR} does not exist, \ I will not create this automatically!" -if [ "x${COPY_BINLOGS}" = "x1" ]; then +if [[ "x${COPY_BINLOGS}" = "x1" ]]; then mkdir -p "${STOR_DIR}/bnlg" || \ _d_inf "ERROR: ${STOR_DIR}/bnlg does no exist and cannot be created \ automatically!" @@ -440,33 +440,33 @@ _start_backup_date=$(date) _s_inf "INFO: Backup job started: ${_start_backup_date}" DEFAULTS_FILE_FLAG= -[ -n "${DEFAULTS_FILE}" ] && DEFAULTS_FILE_FLAG="--defaults-file=${DEFAULTS_FILE}" +[[ -n "${DEFAULTS_FILE}" ]] && DEFAULTS_FILE_FLAG="--defaults-file=${DEFAULTS_FILE}" # Check for innobackupex if ! _ibx=$(command -v innobackupex); then _d_inf 'ERROR: Could not find innobackupex binary!' fi -if [ -n ${DEFAULTS_FILE} ]; then _ibx="${_ibx} ${DEFAULTS_FILE_FLAG}"; fi -if [ "x${GALERA_INFO}" = "x1" ]; then _ibx="${_ibx} --galera-info"; fi +if [[ -n ${DEFAULTS_FILE} ]]; then _ibx="${_ibx} ${DEFAULTS_FILE_FLAG}"; fi +if [[ "x${GALERA_INFO}" = "x1" ]]; then _ibx="${_ibx} --galera-info"; fi _ibx_bkp="${_ibx} --no-timestamp" _this_bkp="${WORK_DIR}/bkps/${CURDATE}" -[ "x${STOR_CMP}" = "x1" ] && _this_bkp_stored="${STOR_DIR}/bkps/${CURDATE}.tar.gz" || \ +[[ "x${STOR_CMP}" = "x1" ]] && _this_bkp_stored="${STOR_DIR}/bkps/${CURDATE}.tar.gz" || \ _this_bkp_stored="${STOR_DIR}/bkps/${CURDATE}" set -- $(_sql_last_backup) _last_bkp=$1 _week_no=$2 -if [ -n "${STOR_DIR}" ]; then _this_stor=${STOR_DIR} -elif [ ${KEEP_LCL} -eq 1 ]; then _this_stor=${WORK_DIR} +if [[ -n "${STOR_DIR}" ]]; then _this_stor=${STOR_DIR} +elif [[ ${KEEP_LCL} -eq 1 ]]; then _this_stor=${WORK_DIR} else _this_stor='' fi # # Determine what will be our --incremental-basedir # -if [ "${BKP_TYPE}" = "incr" ]; then - if [ -n "${INC_BSEDIR}" ]; then - if [ ! -d ${WORK_DIR}/bkps/${INC_BSEDIR} ]; then +if [[ "${BKP_TYPE}" = "incr" ]]; then + if [[ -n "${INC_BSEDIR}" ]]; then + if [[ ! -d ${WORK_DIR}/bkps/${INC_BSEDIR} ]]; then _d_inf "ERROR: Specified incremental basedir ${WORK_DIR}/bkps/${_inc_basedir} does not exist."; fi @@ -475,15 +475,15 @@ if [ "${BKP_TYPE}" = "incr" ]; then _inc_basedir=${_last_bkp} fi - if [ ! -n "${_inc_basedir}" ]; then + if [[ ! -n "${_inc_basedir}" ]]; then _d_inf "ERROR: No valid incremental basedir found!"; fi - ( [ "x${APPLY_LOG}" = "x1" ] || [ "x${STOR_CMP}" = "x1" ] ) && \ + ( [[ "x${APPLY_LOG}" = "x1" ]] || [[ "x${STOR_CMP}" = "x1" ]] ) && \ _inc_basedir_path="${WORK_DIR}/bkps/${_inc_basedir}" || \ _inc_basedir_path="${STOR_DIR}/bkps/${_inc_basedir}" - if [ ! -d "${_inc_basedir_path}" ]; then + if [[ ! -d "${_inc_basedir_path}" ]]; then _d_inf "ERROR: Incremental basedir ${_inc_basedir_path} does not exist."; fi @@ -496,7 +496,7 @@ else fi # Check for work directory -if [ ! -d ${WORK_DIR} ]; then _d_inf "ERROR: XtraBackup work directory does not exist"; fi +if [[ ! -d ${WORK_DIR} ]]; then _d_inf "ERROR: XtraBackup work directory does not exist"; fi DATASIZE=$(_du_r ${DATADIR}) DISKSPCE=$(_df ${WORK_DIR}) @@ -504,7 +504,7 @@ HASSPACE=$(echo "${DATASIZE} ${DISKSPCE}"|awk '{if($1 < $2) {print 1} else {prin NOSPACE=0 _echo "INFO: Checking disk space ... (data: ${DATASIZE}) (disk: ${DISKSPCE})" -[ "${HASSPACE}" -eq "${NOSPACE}" ] && \ +[[ "${HASSPACE}" -eq "${NOSPACE}" ]] && \ _d_inf "ERROR: Insufficient space on backup directory!" echo @@ -525,16 +525,16 @@ _s_inf "INFO: Xtrabackup finished: ${_end_backup_date}" echo # Check the exit status from innobackupex, but dont exit right away if it failed -if [ "${RETVAR}" -gt 0 ]; then +if [[ "${RETVAR}" -gt 0 ]]; then _d_inf "ERROR: non-zero exit status of xtrabackup during backup. \ Something may have failed!"; fi -if [ ${COPY_BINLOGS} -eq 1 ]; then +if [[ ${COPY_BINLOGS} -eq 1 ]]; then # Sync the binary logs to local stor first. echo _echo "INFO: Syncing binary log snapshots" -if [ -n "${_last_bkp}" ]; then +if [[ -n "${_last_bkp}" ]]; then _first_bkp_since=$(_sql_first_backup_elapsed) > ${WORK_DIR}/bkps/binlog.index @@ -543,15 +543,15 @@ if [ -n "${_last_bkp}" ]; then echo $(basename ${f}) >> ${WORK_DIR}/bkps/binlog.index done - if [ "${STOR_CMP}" = 1 ]; then - if [ -f "${STOR_DIR}/bkps/${_last_bkp}-xtrabackup_binlog_info.log" ]; then + if [[ "${STOR_CMP}" = 1 ]]; then + if [[ -f "${STOR_DIR}/bkps/${_last_bkp}-xtrabackup_binlog_info.log" ]]; then _xbinlog_info=${STOR_DIR}/bkps/${_last_bkp}-xtrabackup_binlog_info.log - elif [ -f "${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info" ]; then + elif [[ -f "${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info" ]]; then _xbinlog_info=${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info else _xbinlog_info= fi - elif [ -f "${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info" ]; then + elif [[ -f "${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info" ]]; then _xbinlog_info=${STOR_DIR}/bkps/${_last_bkp}/xtrabackup_binlog_info else _xbinlog_info= @@ -567,16 +567,16 @@ if [ -n "${_last_bkp}" ]; then cd ${BNLGDIR} for f in $(grep -A $(cat ${WORK_DIR}/bkps/binlog.index|wc -l) "${_last_binlog}" ${WORK_DIR}/bkps/binlog.index); do - if [ "${STOR_CMP}" = 1 ]; then - [ -f "${_this_stor}/bnlg/${f}.tar.gz" ] && rm -rf "${_this_stor}/bnlg/${f}.tar.gz" + if [[ "${STOR_CMP}" = 1 ]]; then + [[ -f "${_this_stor}/bnlg/${f}.tar.gz" ]] && rm -rf "${_this_stor}/bnlg/${f}.tar.gz" tar czvf "${_this_stor}/bnlg/${f}.tar.gz" ${f} else - [ -f "${_this_stor}/bnlg/${f}" ] && rm -rf "${_this_stor}/bnlg/${f}" + [[ -f "${_this_stor}/bnlg/${f}" ]] && rm -rf "${_this_stor}/bnlg/${f}" cp -v ${f} "${_this_stor}/bnlg/" fi done - if [ -f "${_this_stor}/bnlg/${BNLGFMT}.index" ]; then rm -rf "${_this_stor}/bnlg/${BNLGFMT}.index"; fi + if [[ -f "${_this_stor}/bnlg/${BNLGFMT}.index" ]]; then rm -rf "${_this_stor}/bnlg/${BNLGFMT}.index"; fi cp ${BNLGFMT}.index ${_this_stor}/bnlg/${BNLGFMT}.index cd ${WORK_DIR}/bkps/ fi @@ -592,27 +592,27 @@ fi # Create copies of the backup if STOR_DIR and RMTE_DIR+RMTE_SSH is # specified. # -if [ -n "${STOR_DIR}" ]; then +if [[ -n "${STOR_DIR}" ]]; then echo _echo "INFO: Copying to immediate storage ${STOR_DIR}/bkps/" - if [ "${STOR_CMP}" = 1 ]; then + if [[ "${STOR_CMP}" = 1 ]]; then tar czvf ${STOR_DIR}/bkps/${CURDATE}.tar.gz ${CURDATE} ret=$? - [ -f ${_this_bkp}/xtrabackup_binlog_info ] \ + [[ -f ${_this_bkp}/xtrabackup_binlog_info ]] \ && cp ${_this_bkp}/xtrabackup_binlog_info ${STOR_DIR}/bkps/${CURDATE}-xtrabackup_binlog_info.log else cp -r ${_this_bkp}* ${STOR_DIR}/bkps/ ret=$? fi - if [ "x${ret}" != "x0" ]; then + if [[ "x${ret}" != "x0" ]]; then _s_inf "WARNING: Failed to copy ${_this_bkp} to ${STOR_DIR}/bkps/" _s_inf " I will not be able to delete old backups from your WORK_DIR"; # Delete backup on work dir if no apply log is needed - elif [ "x${APPLY_LOG}" = "x0" ]; then + elif [[ "x${APPLY_LOG}" = "x0" ]]; then _echo "INFO: Cleaning up ${WORK_DIR}/bkps/" cd ${WORK_DIR}/bkps/ - if [ "x${STOR_CMP}" != "x1" ]; then + if [[ "x${STOR_CMP}" != "x1" ]]; then _rxp="${CURDATE}[-info]?+.log" else _rxp="${CURDATE}[-info.log]?" @@ -621,10 +621,10 @@ if [ -n "${STOR_DIR}" ]; then ls | grep -Ev "${_rxp}" for f in $(ls | grep -Ev ${_rxp}); do rm -rf ${f}; done # We also delete the previous incremental if the backup has been successful - elif [ "${BKP_TYPE}" = "incr" ]; then + elif [[ "${BKP_TYPE}" = "incr" ]]; then _echo "INFO: Deleting previous incremental ${WORK_DIR}/bkps/${_inc_basedir}" rm -rf ${WORK_DIR}/bkps/${_inc_basedir}*; - elif [ "${BKP_TYPE}" = "full" ]; then + elif [[ "${BKP_TYPE}" = "full" ]]; then _echo "INFO: Deleting previous work backups $(find ${WORK_DIR}/bkps/ -maxdepth 1 -mindepth 1|grep -v ${CURDATE}|xargs)" rm -rf $(find ${WORK_DIR}/bkps/ -maxdepth 1 -mindepth 1|grep -v ${CURDATE}|xargs) fi @@ -635,11 +635,11 @@ if [[ -n "${RMTE_DIR}" && -n "${RMTE_SSH}" ]]; then echo _echo "INFO: Syncing backup sets to remote ${RMTE_SSH}:${RMTE_DIR}/" rsync -avzp --delete -e ssh ${STOR_DIR}/ ${RMTE_SSH}:${RMTE_DIR}/ - if [ "$?" -gt 0 ]; then _s_inf "WARNING: Failed to sync ${STOR_DIR} to ${RMTE_SSH}:${RMTE_DIR}/"; fi + if [[ "$?" -gt 0 ]]; then _s_inf "WARNING: Failed to sync ${STOR_DIR} to ${RMTE_SSH}:${RMTE_DIR}/"; fi _echo " ... done" fi -if [ "${BKP_TYPE}" = "incr" ]; then +if [[ "${BKP_TYPE}" = "incr" ]]; then set -- $(_sql_incr_bsedir ${_week_no}) _incr_base=$1 _incr_baseid=$2 @@ -650,22 +650,22 @@ else fi # Start, whether apply log is enabled -if [ "${APPLY_LOG}" = 1 ]; then +if [[ "${APPLY_LOG}" = 1 ]]; then -if [ -n "${USE_MEMORY}" ]; then _ibx_prep="${_ibx} --use-memory=${USE_MEMORY}"; fi +if [[ -n "${USE_MEMORY}" ]]; then _ibx_prep="${_ibx} --use-memory=${USE_MEMORY}"; fi -if [ "${status}" != 1 ]; then +if [[ "${status}" != 1 ]]; then _start_prepare_date=$(date) _s_inf "INFO: Apply log started: ${_start_prepare_date}" - if [ "${BKP_TYPE}" = "incr" ]; then - if [ ! -n "${_incr_base}" ]; then + if [[ "${BKP_TYPE}" = "incr" ]]; then + if [[ ! -n "${_incr_base}" ]]; then _d_inf "ERROR: No valid base backup found!"; fi _incr_base=P_${_incr_base} - if [ ! -d "${WORK_DIR}/bkps/${_incr_base}" ]; then + if [[ ! -d "${WORK_DIR}/bkps/${_incr_base}" ]]; then _d_inf "ERROR: Base backup ${WORK_DIR}/bkps/${_incr_base} does not exist."; fi _ibx_prep="${_ibx_prep} --apply-log --redo-only ${WORK_DIR}/bkps/${_incr_base} --incremental-dir ${_this_bkp}" @@ -676,7 +676,7 @@ if [ "${status}" != 1 ]; then # Check to make sure we have enough disk space to make a copy _bu_size=$(_du_r ${_this_bkp}) _du_left=$(_df ${WORK_DIR}) - if [ "${_bu_size}" -gt "${_du_left}" ]; then + if [[ "${_bu_size}" -gt "${_du_left}" ]]; then _d_inf "ERROR: Apply to copy was specified, however there is not \ enough disk space left on device."; else @@ -699,11 +699,11 @@ echo # Check the exit status from innobackupex, but dont exit right # away if it failed -if [ "${RETVAR}" -gt 0 ]; then +if [[ "${RETVAR}" -gt 0 ]]; then _s_inf "ERROR: non-zero exit status of xtrabackup during --apply-log. \ Something may have failed! Please prepare, I have not deleted the \ new backup directory."; -elif [ "x${STOR_CMP}" != "x1" ]; then +elif [[ "x${STOR_CMP}" != "x1" ]]; then rm -rf ${_this_bkp} fi @@ -711,17 +711,17 @@ fi fi _started_at="STR_TO_DATE('${CURDATE}','%Y-%m-%d_%H_%i_%s')" -if [ "${APPLY_LOG}" = 1 ]; then +if [[ "${APPLY_LOG}" = 1 ]]; then _ends_at=$(date -d "${_end_prepare_date}" "+%Y-%m-%d %H:%M:%S") else _ends_at=$(date -d "${_end_backup_date}" "+%Y-%m-%d %H:%M:%S") fi -if [ "${BKP_TYPE}" = "incr" ]; then +if [[ "${BKP_TYPE}" = "incr" ]]; then _incr_basedir="STR_TO_DATE('${_incr_basedir}','%Y-%m-%d_%H_%i_%s')" else _incr_basedir="NULL" fi -[ -d "${_this_bkp}" ] && _bu_size=$(_du_h ${_this_bkp}) || _bu_size=$(_du_h ${_this_bkp_stored}) +[[ -d "${_this_bkp}" ]] && _bu_size=$(_du_h ${_this_bkp}) || _bu_size=$(_du_h ${_this_bkp_stored}) _du_left=$(_df_h ${WORK_DIR}) _sql_save_bkp "${_started_at}" "${_ends_at}" "${_bu_size}" \ @@ -732,9 +732,9 @@ _echo "INFO: Cleaning up previous backup files:" # Depending on how many sets to keep, we query the backups table. # Find the ids of base backups first. _prune_base=$(_sql_prune_base) -if [ -n "${_prune_base}" ]; then +if [[ -n "${_prune_base}" ]]; then _prune_list=$(_sql_prune_list ${_prune_base}) - if [ -n "${_prune_list}" ]; then + if [[ -n "${_prune_list}" ]]; then _echo "INFO: Deleting backups: ${_prune_list}" _sql_prune_rows ${_prune_base} cd ${STOR_DIR}/bkps && rm -rf ${_prune_list} @@ -751,7 +751,7 @@ echo _s_inf "INFO: Backup size: ${_bu_size}" _s_inf "INFO: Remaining space available on backup device: ${_du_left}" _s_inf "INFO: Logfile: ${LOG_FILE}" -[ "x${APPLY_LOG}" = "x1" ] && \ +[[ "x${APPLY_LOG}" = "x1" ]] && \ _s_inf "INFO: Last full backup fully prepared (including incrementals): ${_last_full_prep}" cp ${INF_FILE_WORK} ${INF_FILE_STOR} echo diff --git a/docker/mysql-xtrabackup/xrecovery.sh b/docker/mysql-xtrabackup/xrecovery.sh index 3897033c..f6ed7545 100644 --- a/docker/mysql-xtrabackup/xrecovery.sh +++ b/docker/mysql-xtrabackup/xrecovery.sh @@ -37,18 +37,18 @@ done rm -rf ${WORKDIR} chain-search.txt chain.txt -if [ ! -f ${CURLOG}.tar.gz ]; then +if [[ ! -f ${CURLOG}.tar.gz ]]; then echo recovery: invalid starting point ${CURLOG}.tar.gz exit 1 fi # do-while while : ; do - if [ ! -f ${CURLOG}-info.log ]; then + if [[ ! -f ${CURLOG}-info.log ]]; then echo recovery: cannot find target log ${CURLOG}-info.log exit 1 fi - if [ ! -f ${CURLOG}.tar.gz ]; then + if [[ ! -f ${CURLOG}.tar.gz ]]; then echo recovery: cannot find target archive ${CURLOG}.tar.gz exit 1 fi @@ -84,26 +84,26 @@ MAXLINE=$(cat chain.txt | wc -l) while read line; do rm -rf ${line} tar -zxf ${line}.tar.gz - if [ ${MAXLINE} -eq 1 ]; then + if [[ ${MAXLINE} -eq 1 ]]; then echo recovery: process single backup ${line} xtrabackup --use-memory ${USE_MEMORY} --prepare --target-dir=${line} - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo recovery: backup preparation failed! not deleting workdir exit 1 fi mv ${line} ${WORKDIR} - elif [ ${CURLINE} -eq 1 ]; then + elif [[ ${CURLINE} -eq 1 ]]; then echo recovery: obtain full backup ${line} xtrabackup --use-memory ${USE_MEMORY} --prepare --apply-log-only --target-dir=${line} - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo recovery: initial backup preparation failed! not deleting workdir exit 1 fi mv ${line} ${WORKDIR} - elif [ ${CURLINE} -lt ${MAXLINE} ]; then + elif [[ ${CURLINE} -lt ${MAXLINE} ]]; then echo recovery: apply intermediate incremental ${line} xtrabackup --use-memory ${USE_MEMORY} --prepare --apply-log-only --target-dir=${WORKDIR} --incremental-dir=$(pwd)/${line} - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo recovery: intermediate recovery failed! not deleting workdir exit 1 fi @@ -111,7 +111,7 @@ while read line; do else echo recovery: apply final incremental ${line} xtrabackup --use-memory ${USE_MEMORY} --prepare --target-dir=${WORKDIR} --incremental-dir=${line} - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo recovery: final incremental failed! not deleting workdir exit 1 fi diff --git a/packages/express/ami/ami-rekey.sh b/packages/express/ami/ami-rekey.sh index 29ff3629..0bece0d8 100644 --- a/packages/express/ami/ami-rekey.sh +++ b/packages/express/ami/ami-rekey.sh @@ -27,7 +27,7 @@ case "$1" in ;; esac -if [ -f /etc/appliance-unlocked ]; then +if [[ -f /etc/appliance-unlocked ]]; then # only once exit 0 fi diff --git a/packages/launchpad/vm/vm-rekey.sh b/packages/launchpad/vm/vm-rekey.sh index dbc9712e..d8fe354f 100644 --- a/packages/launchpad/vm/vm-rekey.sh +++ b/packages/launchpad/vm/vm-rekey.sh @@ -27,7 +27,7 @@ case "$1" in ;; esac -if [ -f /etc/appliance-unlocked ]; then +if [[ -f /etc/appliance-unlocked ]]; then # only once exit 0 fi diff --git a/packages/lightsail/duplicity/backup.sh b/packages/lightsail/duplicity/backup.sh index ce19d7f4..3ceeee04 100644 --- a/packages/lightsail/duplicity/backup.sh +++ b/packages/lightsail/duplicity/backup.sh @@ -4,7 +4,7 @@ if [[ $(dpkg --print-architecture) != arm64 ]]; then docker exec $(docker ps | grep mysql | cut -f 1 -d " ") /root/xbackup-wrapper.sh fi -if [ -f /root/cloud-backups-enabled ]; then +if [[ -f /root/cloud-backups-enabled ]]; then S3=$(cat /root/.cloud-s3.txt) KMS=$(cat /root/.cloud-kms.txt) SQLMOUNT=$(docker volume inspect lightsail_sqlbackup | jq -r ".[0].Mountpoint") diff --git a/packages/lightsail/duplicity/restore.sh b/packages/lightsail/duplicity/restore.sh index 6a4afc0c..318468ef 100644 --- a/packages/lightsail/duplicity/restore.sh +++ b/packages/lightsail/duplicity/restore.sh @@ -23,14 +23,14 @@ fi rm -rf $(docker volume inspect lightsail_sqlbackup | jq -r ".[0].Mountpoint")/* rm -rf $(docker volume inspect lightsail_sitevolume | jq -r ".[0].Mountpoint")/* -if [ -f /root/recovery-restore-required ]; then +if [[ -f /root/recovery-restore-required ]]; then source /root/cloud-variables S3=${RECOVERYS3} KMS=${RECOVERYKMS} PASSPHRASE=$(aws s3 cp s3://${S3}/Backup/passphrase.txt - --sse aws:kms --sse-kms-key-id ${KMS}) export PASSPHRASE duplicity --force boto3+s3://${S3}/Backup / -elif [ -f /root/cloud-backups-enabled ]; then +elif [[ -f /root/cloud-backups-enabled ]]; then S3=$(cat /root/.cloud-s3.txt) KMS=$(cat /root/.cloud-kms.txt) PASSPHRASE=$(aws s3 cp s3://${S3}/Backup/passphrase.txt - --sse aws:kms --sse-kms-key-id ${KMS}) diff --git a/packages/standard/ami/ami-configure.sh b/packages/standard/ami/ami-configure.sh index 0ad0a93a..be06431b 100755 --- a/packages/standard/ami/ami-configure.sh +++ b/packages/standard/ami/ami-configure.sh @@ -34,7 +34,7 @@ rm /tmp/mypass ln -s /root/openemr-devops/packages/standard/scripts/restore.sh /root/restore.sh cd /root/openemr-devops/packages/standard -if [ -z "${RECOVERYS3}" ]; then +if [[ -z "${RECOVERYS3}" ]]; then # configure, but do not launch, OpenEMR docker docker-compose create # now we'll install the AWS certs we got when I built the instance diff --git a/utilities/openemr-env-installer/openemr-env-installer b/utilities/openemr-env-installer/openemr-env-installer index 14ab6c7f..232025de 100755 --- a/utilities/openemr-env-installer/openemr-env-installer +++ b/utilities/openemr-env-installer/openemr-env-installer @@ -14,7 +14,7 @@ github_account=$2 # github account # Installation: git --> docker --> docker-compose --> openemr-cmd --> minikube # Run as root for centos/rhel/fedora script_run_as_root(){ - if [ "${UID}" -ne 0 ]; then + if [[ "${UID}" -ne 0 ]]; then echo 'Please run with the root user.' exit fi @@ -44,7 +44,7 @@ install_git() { dnf install git -y ;; esac - [ $? -ne 0 ] && echo && echo -e "\e[31mInstalled failed, please check the repo list and the network.\e[0m" && exit + [[ $? -ne 0 ]] && echo && echo -e "\e[31mInstalled failed, please check the repo list and the network.\e[0m" && exit echo } @@ -70,12 +70,12 @@ installer_script_usage() { git_clone_function(){ code_location=$1 github_account=$2 - cd ${code_location} && [ $(pwd) != "${code_location}" ] && cd ${code_location} + cd ${code_location} && [[ $(pwd) != "${code_location}" ]] && cd ${code_location} echo -e "\033[30m===>\033[0m \033[32mDownloading OpenEMR repository... \033[0m" echo # For openemr repos git clone https://github.com/${github_account}/openemr.git - cd ${code_location}/openemr && [ $(pwd) != "${code_location}/openemr" ] && cd ${code_location}/openemr + cd ${code_location}/openemr && [[ $(pwd) != "${code_location}/openemr" ]] && cd ${code_location}/openemr git remote add upstream https://github.com/openemr/openemr.git git fetch upstream echo @@ -85,11 +85,11 @@ git_clone_function(){ echo # For openemr-devops repos - cd ${code_location} && [ $(pwd) != "${code_location}" ] && cd ${code_location} + cd ${code_location} && [[ $(pwd) != "${code_location}" ]] && cd ${code_location} echo -e "\033[30m===>\033[0m \033[32mDownloading OpenEMR-devops repository... \033[0m" echo git clone https://github.com/${github_account}/openemr-devops.git - cd ${code_location}/openemr-devops && [ $(pwd) != "${code_location}/openemr-devops" ] && cd ${code_location}/openemr-devops + cd ${code_location}/openemr-devops && [[ $(pwd) != "${code_location}/openemr-devops" ]] && cd ${code_location}/openemr-devops git remote add upstream https://github.com/openemr/openemr-devops.git git fetch upstream echo @@ -104,13 +104,13 @@ code_dir_exist_or_not() { code_location=$1 github_account=$2 # Create the code location if not exist - if [ ! -d ${code_location} ]; then + if [[ ! -d ${code_location} ]]; then mkdir -p ${code_location} git_clone_function $1 $2 # It already downloaded if openemr dir exsit - elif [ ! -d ${code_location}/openemr ]; then + elif [[ ! -d ${code_location}/openemr ]]; then git_clone_function $1 $2 - elif [ ! -d ${code_location}/openemr-devops ]; then + elif [[ ! -d ${code_location}/openemr-devops ]]; then git_clone_function $1 $2 else echo -e "\033[30m===>\033[0m \033[32mOpenEMR repos are already downloaded. \033[0m" @@ -134,30 +134,30 @@ rhel_register_check_and_enable_repo() { register_lock=/tmp/openemr-register-lock repo_lock=/tmp/openemr-repo-lock # Due to subscription-manager check very slow, so add a lock file to check - if [ ! -f ${register_lock} ]; then + if [[ ! -f ${register_lock} ]]; then subscription-manager status | grep Current &>/dev/null - if [ $? -eq 0 ]; then + if [[ $? -eq 0 ]]; then echo 0 > ${register_lock} else echo 1 > ${register_lock} register_note fi - elif [ -f ${register_lock} ]; then - if [ "$(cat /tmp/openemr-register-lock)" = "1" ]; then + elif [[ -f ${register_lock} ]]; then + if [[ "$(cat /tmp/openemr-register-lock)" = "1" ]]; then register_note fi fi # Attach the necessary repo - if [ ! -f ${repo_lock} ]; then + if [[ ! -f ${repo_lock} ]]; then echo -e "\033[30m===>\033[0m \033[32mEnabling base and extras repo... \033[0m" echo subscription-manager repos --enable rhel-7-server-rpms &>/dev/null - [ $? -eq 0 ] && echo 1 > ${repo_lock} + [[ $? -eq 0 ]] && echo 1 > ${repo_lock} subscription-manager repos --enable rhel-7-server-extras-rpms &>/dev/null - [ $? -eq 0 ] && echo 2 >> ${repo_lock} - elif [ -f ${repo_lock} ]; then - if [ "$(cat /tmp/openemr-repo-lock|wc -l)" = "2" ]; then + [[ $? -eq 0 ]] && echo 2 >> ${repo_lock} + elif [[ -f ${repo_lock} ]]; then + if [[ "$(cat /tmp/openemr-repo-lock|wc -l)" = "2" ]]; then echo -e "\033[30m===>\033[0m \033[32mBoth of base and extras repo are already enabled. \033[0m" echo fi @@ -175,7 +175,7 @@ install_docker() { os_distribution=$(grep ^HOME_URL /etc/os-release| awk -F'[/.]' '{print $(NF-2)}') # Check docker service status if do not install or not startup sudo systemctl start docker &>/dev/null - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then case "${os_distribution}" in ubuntu|debian) echo -e "\033[30m===>\033[0m \033[32mUpdate the apt package index and install packages to allow apt to use a repository over HTTPS... \033[0m" @@ -254,7 +254,7 @@ install_docker_compose() { # Install openemr-cmd tool install_openemr_cmd() { os_type=$(uname) - if [ "${os_type}" = "Linux" ]; then + if [[ "${os_type}" = "Linux" ]]; then os_distribution=$(grep ^HOME_URL /etc/os-release| awk -F'[/.]' '{print $(NF-2)}') else os_distribution=$(uname) @@ -321,9 +321,9 @@ install_conntrack() { # Install minikube and kubectl install_minikube_kubectl(){ # Judge the os type e.g. Linux or MacOS - if [ "${os_type}" = "Linux" ]; then + if [[ "${os_type}" = "Linux" ]]; then minikube_os=linux - elif [ "${os_type}" = "Darwin" ]; then + elif [[ "${os_type}" = "Darwin" ]]; then minikube_os=darwin fi # Install minikube @@ -400,50 +400,50 @@ startup_the_env() { # The main logic # Judge the os type e.g. Linux or MacOS -if [ "${os_type}" = "Linux" ]; then +if [[ "${os_type}" = "Linux" ]]; then major_release=$(grep VERSION_ID /etc/os-release | awk -F'[.]' '{print $1}' | awk -F'["]' '{print $2}') os_distribution=$(grep ^HOME_URL /etc/os-release| awk -F'[/.]' '{print $(NF-2)}') case "${os_distribution}" in ubuntu) - if [ "${major_release}" -lt "16" ]; then + if [[ "${major_release}" -lt "16" ]]; then echo 'The script only supports ubuntu 16.04 or later.' exit fi - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage ;; debian) - if [ "${major_release}" -lt "9" ] || [ "${major_release}" -gt "10" ]; then + if [[ "${major_release}" -lt "9" ]] || [[ "${major_release}" -gt "10" ]]; then echo 'The script only supports debian 9 or debian 10.' exit fi - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage ;; fedoraproject) # Due to the different format, so get the keyword again in fedora major_release=$(grep VERSION_ID /etc/os-release| awk -F'=' '{print $2}') - if [ "${major_release}" -lt "30" ] || [ "${major_release}" -gt "31" ]; then + if [[ "${major_release}" -lt "30" ]] || [[ "${major_release}" -gt "31" ]]; then echo 'The script only supports fedora30 and fedora31.' exit fi script_run_as_root - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage ;; redhat) - if [ "${major_release}" != "7" ]; then + if [[ "${major_release}" != "7" ]]; then echo 'The script only supports rhel7.' exit fi script_run_as_root - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage rhel_register_check_and_enable_repo ;; centos) - if [ "${major_release}" != "7" ]; then + if [[ "${major_release}" != "7" ]]; then echo 'The script only supports centos7.' exit fi script_run_as_root - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage ;; esac install_git @@ -455,17 +455,17 @@ if [ "${os_type}" = "Linux" ]; then install_minikube_kubectl quick_check_result startup_the_env $1 -elif [ "${os_type}" = "Darwin" ]; then +elif [[ "${os_type}" = "Darwin" ]]; then # For MacOS # Check the macOS version major_release=$(sw_vers -productVersion| awk -F'.' '{print $1}') - if [ "${major_release}" -lt "10" ]; then + if [[ "${major_release}" -lt "10" ]]; then echo 'The script only supports macOS10.13 and later.' exit - elif [ "${major_release}" -eq "10" ]; then + elif [[ "${major_release}" -eq "10" ]]; then minor_release=$(sw_vers -productVersion| awk -F'.' '{print $2}') - if [ "${minor_release}" -lt "13" ]; then + if [[ "${minor_release}" -lt "13" ]]; then echo 'The script only supports macOS10.13 and later.' exit fi @@ -473,7 +473,7 @@ elif [ "${os_type}" = "Darwin" ]; then # Script usage - [ $# -ne 2 ] && installer_script_usage + [[ $# -ne 2 ]] && installer_script_usage # Install brew # Homebrew info: https://brew.sh @@ -493,7 +493,7 @@ elif [ "${os_type}" = "Darwin" ]; then else echo -e "\033[30m===>\033[0m \033[32mInstalling git... \033[0m" brew install git - [ $? -ne 0 ] && echo && echo -e "\e[31mInstalled failed, please check the network.\e[0m" && exit + [[ $? -ne 0 ]] && echo && echo -e "\e[31mInstalled failed, please check the network.\e[0m" && exit echo fi @@ -515,7 +515,7 @@ elif [ "${os_type}" = "Darwin" ]; then while true do read -p "Please enter [yes|y] to continue if you already permitted the docker access: " CONFIRM_PERMIT - if [ "${CONFIRM_PERMIT}" = "yes" ] || [ "${CONFIRM_PERMIT}" = "y" ]; then + if [[ "${CONFIRM_PERMIT}" = "yes" ]] || [[ "${CONFIRM_PERMIT}" = "y" ]]; then break fi done diff --git a/utilities/openemr-env-migrator/openemr-env-migrator b/utilities/openemr-env-migrator/openemr-env-migrator index 411356db..ab698b40 100755 --- a/utilities/openemr-env-migrator/openemr-env-migrator +++ b/utilities/openemr-env-migrator/openemr-env-migrator @@ -7,7 +7,7 @@ first_arg=$1 target_dir_local_check() { migrate_target_dir=$1 echo -e "\033[30m===>\033[0m \033[32mChecking the target dir... \033[0m" - [ ! -d "${migrate_target_dir}" ] && mkdir ${migrate_target_dir} -p + [[ ! -d "${migrate_target_dir}" ]] && mkdir ${migrate_target_dir} -p echo } @@ -68,7 +68,7 @@ set_daemon_json_file() { migrate_target_dir=$1 echo -e "\033[30m===>\033[0m \033[32mSetting daemon.json file... \033[0m" echo - if [ ! -f /etc/docker/daemon.json ]; then + if [[ ! -f /etc/docker/daemon.json ]]; then cat << EOF > /etc/docker/daemon.json { "data-root": "${migrate_target_dir}" @@ -76,7 +76,7 @@ set_daemon_json_file() { EOF else grep data-root /etc/docker/daemon.json &>/dev/null - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then sed -i '$d' /etc/docker/daemon.json echo ' "data-root": "migrate"' >> /etc/docker/daemon.json sed -i "s#\"data-root\"\:\ \"migrate\"#\"data-root\"\: \"${migrate_target_dir}\"#g" /etc/docker/daemon.json @@ -156,7 +156,7 @@ script_usage() { exit } -if [ "${first_arg}" != "-t" ]; then +if [[ "${first_arg}" != "-t" ]]; then script_usage fi @@ -204,23 +204,23 @@ done # Main logic # Migrate from one data dir to another dir in local -if [ "${migrate_type}" = 'local' ]; then +if [[ "${migrate_type}" = 'local' ]]; then target_dir_local_check ${migrate_target_dir} stop_all_containers sync_data_dir_in_local ${migrate_source_dir} ${migrate_target_dir} set_daemon_json_file ${migrate_target_dir} start_all_containers # Migrate data dir from one host to remote host(first step) -elif [ "${migrate_type}" = 'remote' ]; then +elif [[ "${migrate_type}" = 'remote' ]]; then target_dir_remote_check ${target_ssh_user} ${target_ip} ${migrate_target_dir} stop_all_containers sync_data_dir_to_remote ${migrate_source_dir} ${target_ssh_user} ${target_ip} ${migrate_target_dir} # Second step -elif [ "${migrate_type}" = 'set' ]; then +elif [[ "${migrate_type}" = 'set' ]]; then set_daemon_json_file ${migrate_target_dir} start_all_containers # Migrate the single container -elif [ "${migrate_type}" = 'single' ]; then +elif [[ "${migrate_type}" = 'single' ]]; then commit_and_save_container ${migrate_container_name} ${container_name} ${target_ssh_user} ${target_ip} else echo -e "\033[0m\033[31mInvalid migrate type, e.g. local, remote, set, single. \033[0m" diff --git a/utilities/openemr-monitor/monitor-installer b/utilities/openemr-monitor/monitor-installer index ee6601f0..8a652918 100644 --- a/utilities/openemr-monitor/monitor-installer +++ b/utilities/openemr-monitor/monitor-installer @@ -17,14 +17,14 @@ if ! command -v docker &>/dev/null; then elif ! command -v docker-compose &>/dev/null; then echo 'Please check docker-compose install or not.' exit ${DOCKER_COMPOSE_CODE} -elif [ $(ps aux|grep dockerd|grep -v grep|wc -l) -ne 1 ]; then +elif [[ $(ps aux|grep dockerd|grep -v grep|wc -l) -ne 1 ]]; then echo "Please check doceker start or not." exit ${DOCKER_START} fi # Modify the ip and mail setting for the yml files ARG_CODE=13 -if [ $# -ne 6 ]; then +if [[ $# -ne 6 ]]; then echo "Usage: bash $(basename $0) " echo 'e.g.' echo "bash $(basename $0) /home/openemr-monitor 192.168.2.111 smtp.gmail.com:587 monitor@gmail.com pass12 test@gmail.com" @@ -32,7 +32,7 @@ if [ $# -ne 6 ]; then fi # Install location -[ ! -d "${installDir}" ] && mkdir ${installDir}/grafana/provisioning/{dashboards,datasources} -p +[[ ! -d "${installDir}" ]] && mkdir ${installDir}/grafana/provisioning/{dashboards,datasources} -p mkdir ${installDir}/prometheus -p echo