Skip to content

Instantly share code, notes, and snippets.

@sandcastle
Last active April 5, 2025 02:34
Show Gist options
  • Save sandcastle/8e12b089be59c91dc78b747702908cc5 to your computer and use it in GitHub Desktop.
Save sandcastle/8e12b089be59c91dc78b747702908cc5 to your computer and use it in GitHub Desktop.
Useful shell helper functions and tests to show usage

Bash Script Testing

This directory contains test files for the bash utility scripts used throughout the project.

Test Structure

  • test.sh: Main test runner that executes all test files (located in this directory)
  • utils.test.sh: Tests for _utils.sh utility functions
  • Additional test files should be named after the script they test with a .test.sh suffix

Running Tests

# From the scripts directory:
./tests/test.sh

# From within the tests directory:
./test.sh

# Run a specific test file directly:
./utils.test.sh

Test File Structure

Each test file should:

  1. Source the utility functions from _utils.sh in the parent directory
  2. Use the test assertion functions (assert_equals, assert_true, assert_false, assert_contains)
  3. Create temporary test directories/files that are cleaned up on exit
  4. Group related tests with headers
  5. Return a non-zero exit code if any tests fail

Writing New Tests

When adding new utility functions or scripts, create corresponding test files that validate:

  1. Normal/expected behavior
  2. Edge cases and boundary conditions
  3. Error handling
  4. Input validation
  5. Cross-platform compatibility

Test Helpers

Test helper functions include:

  • assert_equals: Compare expected vs actual values
  • assert_contains: Check if a string contains a substring
  • assert_true: Verify a command returns true (exit code 0)
  • assert_false: Verify a command returns false (non-zero exit code)

Continuous Integration

These tests are run automatically as part of the CI/CD pipeline to ensure script quality.

#!/bin/bash
# -----------------------------
# TEXT FORMATTING + FEEDBACK
# -----------------------------
# Auto-detect if terminal supports colors (without forcing)
_colors_enabled=0
# Check if output is to a terminal and if TERM indicates color support
if [[ -t 1 ]]; then
case "$TERM" in
xterm*|rxvt*|screen*|tmux*|vt100*|ansi|cygwin|linux|vscode*)
_colors_enabled=1
;;
esac
# Some terminals report as dumb but still support color
if [[ "$TERM" == "dumb" && -n "$COLORTERM" ]]; then
_colors_enabled=1
fi
# Check for NO_COLOR environment variable (respecting color disabling convention)
if [[ -n "$NO_COLOR" ]]; then
_colors_enabled=0
fi
fi
# ANSI color codes - simple variables for best compatibility
_COLOR_BLACK=30
_COLOR_RED=31
_COLOR_GREEN=32
_COLOR_YELLOW=33
_COLOR_BLUE=34
_COLOR_MAGENTA=35
_COLOR_CYAN=36
_COLOR_WHITE=37
_COLOR_GREY=90
# Cross-platform echo with escape sequences
_echo() {
# printf is more portable for escape sequences than echo -e
printf "%b\n" "$*"
}
# A simplified formatting function
# Usage: format_text [options] "text"
# Options:
# --color COLOR : Set text color (black|red|green|yellow|blue|magenta|cyan|white|grey)
# --bold : Make text bold
# --underline : Underline text
# --dim : Dim text
# --indent N : Indent text by N spaces (default 2)
# --prefix CHAR : Add prefix character before text
# --prefix-color C : Set prefix color
format_text() {
# Default values
local color=""
local bold=0
local underline=0
local dim=0
local indent=0
local prefix=""
local prefix_color=""
local text=""
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--color)
color="$2"
shift 2
;;
--bold)
bold=1
shift
;;
--underline)
underline=1
shift
;;
--dim)
dim=1
shift
;;
--indent)
indent="$2"
shift 2
;;
--prefix)
prefix="$2"
shift 2
;;
--prefix-color)
prefix_color="$2"
shift 2
;;
*)
# Last argument is the text
text="$1"
shift
;;
esac
done
# Build format string
local format=""
local color_code=""
local prefix_format=""
local indentation=""
# Apply color if enabled
if [[ "$_colors_enabled" -eq 1 ]]; then
# Get color code
case "$color" in
black) color_code="$_COLOR_BLACK" ;;
red) color_code="$_COLOR_RED" ;;
green) color_code="$_COLOR_GREEN" ;;
yellow) color_code="$_COLOR_YELLOW" ;;
blue) color_code="$_COLOR_BLUE" ;;
magenta) color_code="$_COLOR_MAGENTA" ;;
cyan) color_code="$_COLOR_CYAN" ;;
white) color_code="$_COLOR_WHITE" ;;
grey) color_code="$_COLOR_GREY" ;;
esac
# Build format string
[[ -n "$color_code" ]] && format+="\033[${color_code}m"
[[ "$bold" -eq 1 ]] && format+="\033[1m"
[[ "$underline" -eq 1 ]] && format+="\033[4m"
[[ "$dim" -eq 1 ]] && format+="\033[2m"
fi
# Build indentation
if [[ "$indent" -gt 0 ]]; then
indentation="$(printf "%$((indent * 2))s" "")"
fi
# Handle prefix
if [[ -n "$prefix" ]]; then
if [[ "$_colors_enabled" -eq 1 && -n "$prefix_color" ]]; then
local prefix_code=""
case "$prefix_color" in
black) prefix_code="$_COLOR_BLACK" ;;
red) prefix_code="$_COLOR_RED" ;;
green) prefix_code="$_COLOR_GREEN" ;;
yellow) prefix_code="$_COLOR_YELLOW" ;;
blue) prefix_code="$_COLOR_BLUE" ;;
magenta) prefix_code="$_COLOR_MAGENTA" ;;
cyan) prefix_code="$_COLOR_CYAN" ;;
white) prefix_code="$_COLOR_WHITE" ;;
grey) prefix_code="$_COLOR_GREY" ;;
esac
if [[ -n "$prefix_code" ]]; then
prefix="\033[${prefix_code}m$prefix\033[0m"
[[ -n "$format" ]] && prefix="$prefix$format"
fi
elif [[ "$_colors_enabled" -eq 1 && -n "$color_code" ]]; then
prefix="\033[${color_code}m$prefix"
fi
text="$prefix $text"
fi
# Output formatted text
if [[ -n "$format" ]]; then
_echo "${indentation}${format}${text}\033[0m"
else
_echo "${indentation}${text}"
fi
}
# Shorthand helper functions
bold() {
format_text --bold "$@"
}
underline() {
format_text --underline "$@"
}
dim() {
format_text --dim "$@"
}
red() {
format_text --color red "$@"
}
green() {
format_text --color green "$@"
}
yellow() {
format_text --color yellow "$@"
}
blue() {
format_text --color blue "$@"
}
grey() {
format_text --color grey "$@"
}
# Styled helpers
arrow() {
format_text --color blue --prefix "➜" "$@"
}
bullet() {
format_text --color grey --prefix "•" "$@"
}
numbered() {
local n="$1"
shift
format_text --color grey --prefix "$(printf "%2d." "$n")" "$@"
}
info() {
format_text --color white "$@"
}
success() {
format_text --color green --prefix "✔" "$@"
}
warn() {
format_text --color yellow --prefix "!" "$@"
}
error() {
format_text --color red --prefix "✘" "$@"
}
fatal() {
format_text --color red --prefix "FATAL" "$@"
exit 1
}
# -----------------------------
# HEADERS AND STRUCTURE
# -----------------------------
# Store settings to be displayed after the next header
SETTING_KEYS=()
SETTING_VALUES=()
_has_pending_settings=false
# Add a setting to be displayed with the next header
add_setting() {
local key="$1"
local value="$2"
SETTING_KEYS+=("$key")
SETTING_VALUES+=("$value")
_has_pending_settings=true
}
# Clear all pending settings
clear_settings() {
SETTING_KEYS=()
SETTING_VALUES=()
_has_pending_settings=false
}
# Internal function to print pending settings
_print_pending_settings() {
echo
for i in "${!SETTING_KEYS[@]}"; do
printf " $(dim "%-20s") %s\n" "${SETTING_KEYS[$i]}:" "${SETTING_VALUES[$i]}"
done
clear_settings
}
# Base header function - internal use only
_header() {
local level=""
local title=""
local show_settings=false
# Parse arguments
while [[ $# -gt 0 ]]; do
case "$1" in
--settings)
show_settings=true
shift
;;
1|2|3)
level="$1"
shift
;;
*)
# Last argument is the text
title="$1"
shift
;;
esac
done
echo
case "$level" in
1) echo "$(bold "$(underline "$title")")" ;;
2) echo "$(bold "$title")" ;;
3) echo "$(dim "• $title")" ;;
esac
if [ "$show_settings" = true ] || [ "$_has_pending_settings" = true ]; then
_print_pending_settings
fi
# Add extra newline for h1 and h2
if [ "$level" -lt 3 ]; then
echo
fi
}
# Header 1 - Main section header
h1() {
_header 1 "$@"
}
# Header 2 - Subsection header
h2() {
_header 2 "$@"
}
# Header 3 - Minor section header
h3() {
_header 3 "$@"
}
# -----------------------------
# PROMPTS
# -----------------------------
confirm() {
read -r -p "$(yellow "$1 [y/N]: ")" response
case "$response" in
[yY][eE][sS]|[yY]) true ;;
*) false ;;
esac
}
# Get user input with optional default value
# Usage: input "What is your name?" "John Doe"
input() {
local prompt="$1"
local default="$2"
local response
if [ -n "$default" ]; then
read -r -p "$(yellow "$prompt [$default]: ")" response
echo "${response:-$default}"
else
read -r -p "$(yellow "$prompt: ")" response
echo "$response"
fi
}
# Select from a list of options
# Usage: select_option "Select environment" "dev" "staging" "prod"
select_option() {
local prompt="$1"
shift
local options=("$@")
local selected
echo "$prompt:"
select selected in "${options[@]}"; do
if [ -n "$selected" ]; then
echo "$selected"
break
fi
done
}
# -----------------------------
# UTILITIES
# -----------------------------
# Check if command exists
cmd_exists() {
command -v "$1" >/dev/null 2>&1
}
# Check if required command is available
# Usage: require_command "terraform"
require_command() {
if ! command -v "$1" &> /dev/null; then
echo "ERROR: Required command '$1' is not installed or not in PATH" >&2
exit 1
fi
}
# Run a command and exit if it fails
run_or_die() {
"$@"
local status=$?
if [ $status -ne 0 ]; then
fatal "Command failed: $*"
fi
return $status
}
# Run command with retries
# Usage: retry 3 some_command
retry() {
local retries="$1"
shift
local count=0
local wait=5
until "$@"; do
exit=$?
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
warn "Command failed. Attempt $count/$retries. Retrying in ${wait}s..."
sleep "$wait"
else
fatal "Command failed after $retries attempts"
fi
done
return 0
}
# Check if script is run as root
require_root() {
if [ "$EUID" -ne 0 ]; then
fatal "This script must be run as root"
fi
}
# Get absolute path
get_abs_path() {
local path="$1"
echo "$(cd "$(dirname "$path")" && pwd)/$(basename "$path")"
}
# Check if directory is empty
is_dir_empty() {
local dir="$1"
[ -z "$(ls -A "$dir" 2>/dev/null)" ]
}
# Create directory if it doesn't exist
ensure_dir() {
local dir="$1"
if [ ! -d "$dir" ]; then
mkdir -p "$dir"
fi
}
# Check if string contains substring
# Usage: if string_contains "hello world" "world"; then echo "yes"; fi
string_contains() {
local string="$1"
local substring="$2"
[[ "$string" == *"$substring"* ]]
}
# Spinner (for background jobs)
# Usage:
# long_running_command & spinner
# # or
# (long_running_command) & spinner
spinner() {
local pid=$!
local delay=0.1
local spinstr='|/-\'
while ps -p "$pid" > /dev/null 2>&1; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
# -----------------------------
# VALIDATION
# -----------------------------
# Validate IP address
is_valid_ip() {
local ip="$1"
local stat=1
if [[ "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
read -r -a ip_array <<< "$ip"
IFS=$OIFS
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 && \
${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
stat=$?
fi
return $stat
}
# Validate URL
is_valid_url() {
local url="$1"
local regex='(https?|ftp|file)://[-[:alnum:]\+&@#/%?=~_|!:,.;]*[-[:alnum:]\+&@#/%=~_|]'
[[ "$url" =~ $regex ]]
}
# -----------------------------
# LOGGING
# -----------------------------
logfile="/tmp/script.log"
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') $*" >> "$logfile"
}
# Rotate log file if it exceeds size (in MB)
rotate_log() {
local max_size="$1"
local size_mb
if [ -f "$logfile" ]; then
size_mb=$(du -m "$logfile" | cut -f1)
if [ "$size_mb" -gt "$max_size" ]; then
mv "$logfile" "${logfile}.$(date +%Y%m%d_%H%M%S)"
touch "$logfile"
fi
fi
}
# -----------------------------
# DEBUG
# -----------------------------
debug() {
[ "$DEBUG" == "1" ] && echo -e "$(blue "[DEBUG]") $1"
}
# Print stack trace
print_stack_trace() {
local frame=0
while caller "$frame"; do
((frame++))
done
}
# Set debug trap to print commands
enable_debug_mode() {
if [ "$DEBUG" == "1" ]; then
set -x
trap 'debug "Line $LINENO: $BASH_COMMAND"' DEBUG
fi
}
# -----------------------------
# CLEANUP
# -----------------------------
# Array to store cleanup functions
declare -a cleanup_functions
# Add a function to the cleanup stack that will be executed on script exit
# Usage: add_cleanup "rm -f /tmp/tempfile.txt"
# Params:
# $1: The command to execute during cleanup (as a string)
# Example:
# add_cleanup "rm -f \$tempfile"
# add_cleanup "kill \$server_pid"
add_cleanup() {
cleanup_functions+=("$1")
}
# Run all registered cleanup functions in reverse order
# This function is automatically called by the EXIT trap
# and should not be called manually
# Usage: Automatic via trap
cleanup() {
# Execute cleanup functions in reverse order (last added, first executed)
for ((i=${#cleanup_functions[@]}-1; i>=0; i--)); do
eval "${cleanup_functions[i]}"
done
}
# Set trap for cleanup to ensure resources are properly released
# when the script exits for any reason
trap cleanup EXIT
#!/usr/bin/env bash
# =============================================================================
# Comprehensive Test Suite for _utils.sh Functions
# =============================================================================
# Get the directory where this test script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Path to the parent directory (scripts directory)
PARENT_DIR="$(dirname "$SCRIPT_DIR")"
# Source the utilities file from the parent directory
source "$PARENT_DIR/_utils.sh"
# =============================================================================
# Test Framework
# =============================================================================
# Initialize test counters
TEST_COUNT=0
PASS_COUNT=0
FAIL_COUNT=0
# assert_equals: Check if two values are equal
#
# Parameters:
# $1: Expected value
# $2: Actual value
# $3: Optional test description
assert_equals() {
local expected="$1"
local actual="$2"
local message="${3:-}"
TEST_COUNT=$((TEST_COUNT + 1))
if [[ "$expected" == "$actual" ]]; then
echo -e "$(green "✅ PASS"): ${message:-"Expected '$expected', got '$actual'"}"
PASS_COUNT=$((PASS_COUNT + 1))
return 0
else
echo -e "$(red "❌ FAIL"): ${message:-"Expected '$expected', got '$actual'"}"
FAIL_COUNT=$((FAIL_COUNT + 1))
return 1
fi
}
# assert_contains: Check if string contains a substring
#
# Parameters:
# $1: String to search in
# $2: Substring to find
# $3: Optional test description
assert_contains() {
local string="$1"
local substring="$2"
local message="${3:-}"
TEST_COUNT=$((TEST_COUNT + 1))
if [[ "$string" == *"$substring"* ]]; then
echo -e "$(green "✅ PASS"): ${message:-"String contains '$substring'"}"
PASS_COUNT=$((PASS_COUNT + 1))
return 0
else
echo -e "$(red "❌ FAIL"): ${message:-"String does not contain '$substring'"}"
FAIL_COUNT=$((FAIL_COUNT + 1))
return 1
fi
}
# assert_true: Check if command returns true
#
# Parameters:
# $1: Command to evaluate
# $2: Optional test description
assert_true() {
local command="$1"
local message="$2"
TEST_COUNT=$((TEST_COUNT + 1))
if eval "$command"; then
echo -e "$(green "✅ PASS"): ${message:-"Command succeeded: $command"}"
PASS_COUNT=$((PASS_COUNT + 1))
return 0
else
echo -e "$(red "❌ FAIL"): ${message:-"Command failed: $command"}"
FAIL_COUNT=$((FAIL_COUNT + 1))
return 1
fi
}
# assert_false: Check if command returns false
#
# Parameters:
# $1: Command to evaluate
# $2: Optional test description
assert_false() {
local command="$1"
local message="$2"
TEST_COUNT=$((TEST_COUNT + 1))
if ! eval "$command"; then
echo -e "$(green "✅ PASS"): ${message:-"Command failed as expected: $command"}"
PASS_COUNT=$((PASS_COUNT + 1))
return 0
else
echo -e "$(red "❌ FAIL"): ${message:-"Command succeeded unexpectedly: $command"}"
FAIL_COUNT=$((FAIL_COUNT + 1))
return 1
fi
}
# =============================================================================
# Create Test Directories and Files
# =============================================================================
# Create temp directory for test files
TEST_TMP_DIR="$(mktemp -d)"
echo "Creating temporary test directory: $TEST_TMP_DIR"
# Add cleanup function to remove temp directory on exit
add_cleanup "rm -rf \"$TEST_TMP_DIR\""
# =============================================================================
# Text Formatting Tests
# =============================================================================
h1 "Text Formatting Tests"
# Capture formatted output
test_bold=$(bold "Bold Text" 2>/dev/null)
test_red=$(red "Red Text" 2>/dev/null)
test_green=$(green "Green Text" 2>/dev/null)
test_blue=$(blue "Blue Text" 2>/dev/null)
test_yellow=$(yellow "Yellow Text" 2>/dev/null)
test_grey=$(grey "Grey Text" 2>/dev/null)
test_underline=$(underline "Underlined Text" 2>/dev/null)
test_dim=$(dim "Dimmed Text" 2>/dev/null)
# Test if output contains the text (can't test exact formatting since it depends on terminal)
assert_contains "$test_bold" "Bold Text" "bold() function contains the correct text"
assert_contains "$test_red" "Red Text" "red() function contains the correct text"
assert_contains "$test_green" "Green Text" "green() function contains the correct text"
assert_contains "$test_blue" "Blue Text" "blue() function contains the correct text"
assert_contains "$test_yellow" "Yellow Text" "yellow() function contains the correct text"
assert_contains "$test_grey" "Grey Text" "grey() function contains the correct text"
assert_contains "$test_underline" "Underlined Text" "underline() function contains the correct text"
assert_contains "$test_dim" "Dimmed Text" "dim() function contains the correct text"
# Test formatted text with prefixes
test_arrow=$(arrow "Arrow Text" 2>/dev/null)
test_bullet=$(bullet "Bullet Text" 2>/dev/null)
test_success=$(success "Success Text" 2>/dev/null)
test_warn=$(warn "Warning Text" 2>/dev/null)
test_error=$(error "Error Text" 2>/dev/null)
assert_contains "$test_arrow" "Arrow Text" "arrow() function contains the correct text"
assert_contains "$test_bullet" "Bullet Text" "bullet() function contains the correct text"
assert_contains "$test_success" "Success Text" "success() function contains the correct text"
assert_contains "$test_warn" "Warning Text" "warn() function contains the correct text"
assert_contains "$test_error" "Error Text" "error() function contains the correct text"
# =============================================================================
# Headers and Settings Tests
# =============================================================================
h2 "Headers and Settings Tests"
# Test header functions
test_h1=$(h1 "Test Header 1" 2>&1)
test_h2=$(h2 "Test Header 2" 2>&1)
test_h3=$(h3 "Test Header 3" 2>&1)
assert_contains "$test_h1" "Test Header 1" "h1() function contains the correct text"
assert_contains "$test_h2" "Test Header 2" "h2() function contains the correct text"
assert_contains "$test_h3" "Test Header 3" "h3() function contains the correct text"
# Test settings
add_setting "Key 1" "Value 1"
add_setting "Key 2" "Value 2"
test_settings=$(h2 --settings "Settings Test" 2>&1)
assert_contains "$test_settings" "Settings Test" "h2() with settings contains the header text"
assert_contains "$test_settings" "Key 1" "h2() with settings contains the setting key"
assert_contains "$test_settings" "Value 1" "h2() with settings contains the setting value"
assert_contains "$test_settings" "Key 2" "h2() with settings contains the second setting key"
assert_contains "$test_settings" "Value 2" "h2() with settings contains the second setting value"
# =============================================================================
# Prompt Tests
# =============================================================================
h2 "Prompt Tests"
# These are limited in automated testing, but we can check function existence
assert_true "type -t confirm" "confirm() function exists"
assert_true "type -t input" "input() function exists"
assert_true "type -t select_option" "select_option() function exists"
# =============================================================================
# Utility Function Tests
# =============================================================================
h2 "Utility Function Tests"
# Test cmd_exists
assert_true "cmd_exists 'bash'" "cmd_exists() correctly identifies an existing command"
assert_false "cmd_exists 'this_command_does_not_exist'" "cmd_exists() correctly identifies a non-existent command"
# Test require_command (will only check existence - not actual execution)
assert_true "type -t require_command" "require_command() function exists"
# Test get_abs_path
test_rel_path="./relative/path"
test_abs_path="$(get_abs_path "$SCRIPT_DIR/_utils.sh")"
assert_true "[[ \"$test_abs_path\" == /* ]]" "get_abs_path() returns an absolute path"
assert_contains "$test_abs_path" "_utils.sh" "get_abs_path() includes the filename"
# Test is_dir_empty
mkdir -p "$TEST_TMP_DIR/empty_dir"
touch "$TEST_TMP_DIR/non_empty_dir_file"
mkdir -p "$TEST_TMP_DIR/non_empty_dir"
mv "$TEST_TMP_DIR/non_empty_dir_file" "$TEST_TMP_DIR/non_empty_dir/"
assert_true "is_dir_empty \"$TEST_TMP_DIR/empty_dir\"" "is_dir_empty() correctly identifies an empty directory"
assert_false "is_dir_empty \"$TEST_TMP_DIR/non_empty_dir\"" "is_dir_empty() correctly identifies a non-empty directory"
# Test ensure_dir
test_new_dir="$TEST_TMP_DIR/new_dir/nested"
ensure_dir "$test_new_dir"
assert_true "[ -d \"$test_new_dir\" ]" "ensure_dir() creates directories correctly"
# Test string_contains
assert_true "string_contains \"hello world\" \"world\"" "string_contains() correctly identifies substring"
assert_false "string_contains \"hello world\" \"missing\"" "string_contains() correctly rejects missing substring"
# =============================================================================
# Validation Tests
# =============================================================================
h2 "Validation Tests"
# Test is_valid_ip
assert_true "is_valid_ip \"192.168.1.1\"" "is_valid_ip() validates correct IPv4 address"
assert_true "is_valid_ip \"10.0.0.1\"" "is_valid_ip() validates correct IPv4 address"
assert_true "is_valid_ip \"255.255.255.255\"" "is_valid_ip() validates correct IPv4 address"
assert_false "is_valid_ip \"192.168.1.256\"" "is_valid_ip() rejects invalid IPv4 address"
assert_false "is_valid_ip \"not.an.ip.address\"" "is_valid_ip() rejects non-numeric IPv4 address"
assert_false "is_valid_ip \"192.168.1\"" "is_valid_ip() rejects incomplete IPv4 address"
# Test is_valid_url
assert_true "is_valid_url \"https://example.com\"" "is_valid_url() validates correct URL"
assert_true "is_valid_url \"http://example.com/path?query=value\"" "is_valid_url() validates URL with path and query"
assert_true "is_valid_url \"ftp://example.com\"" "is_valid_url() validates FTP URL"
assert_false "is_valid_url \"example.com\"" "is_valid_url() rejects URL without protocol"
assert_false "is_valid_url \"not a url\"" "is_valid_url() rejects invalid URL"
# =============================================================================
# Cleanup Tests
# =============================================================================
h2 "Cleanup Tests"
# Test cleanup functions
cleanup_test_file="$TEST_TMP_DIR/cleanup_test_file"
touch "$cleanup_test_file"
assert_true "[ -f \"$cleanup_test_file\" ]" "Test file exists before cleanup test"
# Add cleanup command
add_cleanup "rm -f \"$cleanup_test_file\""
# Run cleanup manually for testing
cleanup
assert_false "[ -f \"$cleanup_test_file\" ]" "add_cleanup() and cleanup() properly removes test file"
# =============================================================================
# Test Summary
# =============================================================================
h1 "Test Summary"
echo "Tests run: $TEST_COUNT"
echo "Tests passed: $(green "$PASS_COUNT")"
echo "Tests failed: $(red "$FAIL_COUNT")"
if [ "$FAIL_COUNT" -gt 0 ]; then
error "Some tests failed"
exit 1
else
success "All tests passed"
exit 0
fi
#!/usr/bin/env bash
# =============================================================================
# Test Runner for Komo Infrastructure Bash Scripts
# =============================================================================
# Get the directory where this test script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Parent directory (scripts directory)
PARENT_DIR="$(dirname "$SCRIPT_DIR")"
# Source the utility functions from the parent directory
source "$PARENT_DIR/_utils.sh"
# Initialize counters
TESTS_TOTAL=0
TESTS_PASSED=0
TESTS_FAILED=0
# Display header
h1 "Komo Infrastructure Test Runner"
add_setting "Test Date" "$(date '+%Y-%m-%d %H:%M:%S')"
add_setting "Tests Directory" "$SCRIPT_DIR"
# Run a specific test file and track results
run_test_file() {
local test_file="$1"
local test_name="$(basename "$test_file")"
h2 "Running Test: $test_name"
# Execute the test file
bash "$test_file"
local exit_code=$?
TESTS_TOTAL=$((TESTS_TOTAL + 1))
if [ $exit_code -eq 0 ]; then
TESTS_PASSED=$((TESTS_PASSED + 1))
success "✅ Test file passed: $test_name"
return 0
else
TESTS_FAILED=$((TESTS_FAILED + 1))
error "❌ Test file failed: $test_name"
return 1
fi
}
# Run all test files in the current directory
# Find all files ending with .test.sh in the current directory, excluding self
test_files=$(find "$SCRIPT_DIR" -name "*.test.sh" -type f -not -name "$(basename "$0")" | sort)
if [ -z "$test_files" ]; then
warn "No test files found in $SCRIPT_DIR"
else
for test_file in $test_files; do
run_test_file "$test_file"
done
fi
# Display summary
h1 "Test Results Summary"
add_setting "Total Test Files" "$TESTS_TOTAL"
add_setting "Passed" "$TESTS_PASSED"
add_setting "Failed" "$TESTS_FAILED"
# Exit with error if any tests failed
if [ "$TESTS_FAILED" -gt 0 ]; then
error "❌ $TESTS_FAILED test files failed"
exit 1
else
success "✅ All test files passed"
exit 0
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment