Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@
tests/**/results.json
tests/**/testbox/
10 changes: 6 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
FROM ortussolutions/commandbox:jdk11-3.9.3
FROM ortussolutions/boxlang:cli-alpine-1.11.0

WORKDIR /opt/test-runner

# Pre-install box dependencies for offline usage
COPY box.json .
RUN box install
# Pre-install testbox and its dependencies for offline usage
RUN bash /usr/local/boxlang/scripts/install-bx-module.sh testbox \
&& bash /usr/local/boxlang/scripts/install-bx-module.sh globber \
&& mkdir -p ~/.boxlang/modules/testbox/system/modules \
&& cp -R ~/.boxlang/modules/globber ~/.boxlang/modules/testbox/system/modules/

COPY . .

Expand Down
38 changes: 16 additions & 22 deletions bin/run-tests.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
#! /bin/sh
#!/usr/bin/env bash
set -euo pipefail

# Synopsis:
# Test the test runner by running it against a predefined set of solutions
# Test the test runner by running it against a predefined set of solutions
# with an expected output.

# Output:
Expand All @@ -15,26 +16,19 @@ exit_code=0

# Iterate over all test directories
for test_dir in tests/*; do
test_dir_name=$(basename "${test_dir}")
test_dir_path=$(realpath "${test_dir}")
results_file_path="${test_dir_path}/results.json"
expected_results_file_path="${test_dir_path}/expected_results.json"

bin/run.sh "${test_dir_name}" "${test_dir_path}" "${test_dir_path}"

# Normalize the results file
sed -i -E \
-e 's/\s+\([0-9]+ ms\)//g' \
-e 's/Duration.*[0-9]+ms//g' \
-e "s~${test_dir_path}~/solution~g" \
"${results_file_path}"

echo "${test_dir_name}: comparing results.json to expected_results.json"
diff "${results_file_path}" "${expected_results_file_path}"

if [ $? -ne 0 ]; then
exit_code=1
fi
test_dir_name="${test_dir##*/}"
test_dir_path="$(realpath "${test_dir}")"
results_file_path="${test_dir_path}/results.json"
expected_results_file_path="${test_dir_path}/expected_results.json"

bin/run.sh "${test_dir_name}" "${test_dir_path}" "${test_dir_path}"

echo "${test_dir_name}: comparing results.json to expected_results.json"
diff "${results_file_path}" "${expected_results_file_path}"

if [ $? -ne 0 ]; then
exit_code=1
fi
done

exit ${exit_code}
57 changes: 24 additions & 33 deletions bin/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,60 +5,51 @@

# Arguments:
# $1: exercise slug
# $2: absolute path to solution folder
# $3: absolute path to output directory
# $2: path to solution folder
# $3: path to output directory

# Output:
# Writes the test results to a results.json file in the passed-in output directory.
# The test results are formatted according to the specifications at https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md

# Example:
# ./bin/run.sh two-fer /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/
# ./bin/run.sh two-fer path/to/solution/folder/ path/to/output/directory/

# If any required arguments is missing, print the usage and exit
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo "usage: ./bin/run.sh exercise-slug /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/"
exit 1
set -euo pipefail

# If any required argument is missing, print the usage and exit
if [[ -z "$1" || -z "$2" || -z "$3" ]]; then
echo "usage: ./bin/run.sh exercise-slug path/to/solution/folder/ path/to/output/directory/"
exit 1
fi

slug="$1"
input_dir="${2%/}"
output_dir="${3%/}"
exercise="${slug//-/_}"
implementation_file="${input_dir}/${exercise}.pl"
tests_file="${input_dir}/${exercise}_tests.plt"
results_file="${output_dir}/results.json"

# Create the output directory if it doesn't exist
mkdir -p "${output_dir}"
if ! mkdir -p "${output_dir}"; then
jq -n '{"version": 2, "status": "error", "message": "The test runner failed to create the output directory for your test run. Please open a thread on the Exercism forums."}' > "${results_file}"
exit 1
fi

echo "${slug}: testing..."

pushd "${input_dir}" > /dev/null
script_dir="${0%/*}"

# Run the tests for the provided implementation file and redirect stdout and
# stderr to capture it
test_output=$(box task run TestRunner 2>&1)
exit_code=$?
# Avoid JVM DNS lookup issue on Alpine
echo "127.0.0.1 $HOSTNAME" > /tmp/hosts
export JAVA_TOOL_OPTIONS="-Djdk.net.hosts.file=/tmp/hosts"

popd > /dev/null
# Need a writable home directory
export BOXLANG_HOME=/tmp/.boxlang

# Write the results.json file based on the exit code of the command that was
# just executed that tested the implementation file
if [ $exit_code -eq 0 ]; then
jq -n '{version: 1, status: "pass"}' > ${results_file}
else
# Sanitize the test output
sanitized_test_output=$(echo "${test_output}" \
| sed -E 's/.*(Installing package|Installing.+dependencies).*//g' \
| sed -e '/./,$!d' -e :a -e '/^\n*$/{$d;N;ba' -e '}')
boxlang "${script_dir}/test-runner.bxs" "${input_dir}" "${output_dir}" > /dev/null 2>&1 || true

# Manually add colors to the output to help scanning the output for errors
colorized_test_output=$(echo "${sanitized_test_output}" \
| GREP_COLOR='01;31' grep --color=always -E -e 'X.*\([0-9]+ ms\)|$' -e '-> Failure.*|$' \
| GREP_COLOR='01;32' grep --color=always -E -e '√.*\([0-9]+ ms\)|$')

jq -n --arg output "${colorized_test_output}" '{version: 1, status: "fail", message: $output}' > ${results_file}
if [[ ! -f "${results_file}" ]]; then
jq -n '{"version": 2, "status": "error", "message": "The test runner failed to run your tests. Please open a thread on the Exercism forums."}' > "${results_file}"
fi

jq . "${results_file}" > "${results_file}.tmp" && mv "${results_file}.tmp" "${results_file}"

echo "${slug}: done"
112 changes: 112 additions & 0 deletions bin/test-runner.bxs
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
solutionPath = server.cli.parsed.positionals[ 1 ];
resultsPath = server.cli.parsed.positionals[ 2 ];

try {
userHome = server.system.properties[ "user.home" ];
mappings = getApplicationMetadata().mappings;
mappings[ "/solution" ] = solutionPath;
mappings[ "/testbox" ] = userHome & "/.boxlang/modules/testbox";

testBox = new testbox.system.TestBox();
testFiles = directoryList( solutionPath, false, "name", "*Test.cfc" );
for ( file in testFiles ) {
testBox.addBundles( "solution." & file.listFirst( "." ) );
}

results = testBox.runRaw().getMemento();
bundleStats = results.bundleStats ?: [];

status = "pass";
message = "";
tests = [];

for ( bundle in bundleStats ) {
if ( structKeyExists( bundle, "globalException" ) && !isSimpleValue( bundle.globalException ) ) {
status = "error";
message = ( bundle.globalException.message ?: "Bundle compilation error" ).replace( solutionPath, "/solution", "ALL" );
break;
}
}

if ( status != "error" ) {
if ( ( results.totalBundles ?: 0 ) == 0 ) {
status = "error";
message = "No tests found";
} else {
// Read test file contents
for ( bundle in bundleStats ) {
testFileName = bundle.name.listLast( "." ) & ".cfc";
filePath = solutionPath & "/" & testFileName;
fileContent = fileExists( filePath ) ? fileRead( filePath ) : "";
collectSpecs( bundle.suiteStats ?: [], tests, fileContent );
}
if ( ( results.totalFail ?: 0 ) > 0 || ( results.totalError ?: 0 ) > 0 ) {
status = "fail";
}
}
}

output = { "version": 2, "status": status };
if ( message.len() ) {
output[ "message" ] = message;
}
if ( tests.len() ) {
output[ "tests" ] = tests;
}

fileWrite( resultsPath & "/results.json", jsonSerialize( output ) );

} catch ( any e ) {
errMessage = e.message.replace( solutionPath, "/solution", "ALL" );
fileWrite( resultsPath & "/results.json",
jsonSerialize( { "version": 2, "status": "error", "message": errMessage } ) );
}

// Recursively collect specs
function collectSpecs( required array suites, required array tests, required string fileContent ) {
for ( var suite in suites ) {
for ( var spec in ( suite.specStats ?: [] ) ) {
var specStatus = ( spec.status ?: "failed" ).lCase();
var testResult = {
"name": spec.name ?: "Unknown",
"status": specStatus == "passed" ? "pass" : ( specStatus == "failed" ? "fail" : "error" ),
"test_code": extractTestCode( fileContent, spec.name ?: "" )
};
if ( testResult.status != "pass" && ( spec.failMessage ?: "" ).len() ) {
testResult[ "message" ] = spec.failMessage;
}
tests.append( testResult );
}
if ( structKeyExists( suite, "suiteStats" ) && suite.suiteStats.len() ) {
collectSpecs( suite.suiteStats, tests, fileContent );
}
}
}

// Extract the body of an it() block
function extractTestCode( required string fileContent, required string testName ) {
if ( !fileContent.len() || !testName.len() ) return "";

var lines = fileContent.listToArray( char( 10 ) );
var depth = 1;
var found = false;
var body = [];

for ( var line in lines ) {
if ( !found ) {
if ( line.reFind( "it\(\s*['""]" & testName & "['""]" ) > 0 && line.find( "{" ) > 0 ) {
found = true;
}
continue;
}

for ( var ch in line.listToArray( "" ) ) {
if ( ch == "{" ) depth++;
if ( ch == "}" ) depth--;
}
if ( depth <= 0 ) break;
body.append( line );
}

return trim( body.toList( char( 10 ) ) );
}
8 changes: 0 additions & 8 deletions box.json

This file was deleted.

19 changes: 19 additions & 0 deletions tests/error-if-empty-file/ErrorIfEmptyFileTest.cfc
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
component extends="testbox.system.BaseSpec" {

function beforeAll(){
SUT = createObject( 'ErrorIfEmptyFile' );
}

function run(){

describe( 'ErrorIfEmptyFile', function(){

it( 'This should error', function(){
expect( SUT.alwaysTrue() ).toBeTrue();
});

});

}

}
5 changes: 5 additions & 0 deletions tests/error-if-empty-file/expected_results.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"message": "Error compiling [ /solution/ErrorIfEmptyFile.cfc ]. /solution/ErrorIfEmptyFile.cfc: Line: 1 Col: 0 - '<EOF>' was unexpected \nexpecting one of: 'abstract', 'final', Operator, Statement\n\n",
"version": 2,
"status": "error"
}
1 change: 1 addition & 0 deletions tests/error-if-syntax-error/ErrorIfSyntaxError.cfc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
componentTTT@@ clazzzz
19 changes: 19 additions & 0 deletions tests/error-if-syntax-error/ErrorIfSyntaxErrorTest.cfc
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
component extends="testbox.system.BaseSpec" {

function beforeAll(){
SUT = createObject( 'ErrorIfSyntaxError' );
}

function run(){

describe( 'ErrorIfSyntaxError', function(){

it( 'This will error out', function(){
expect( SUT.alwaysTrue() ).toBeTrue();
});

});

}

}
5 changes: 5 additions & 0 deletions tests/error-if-syntax-error/expected_results.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"message": "Error compiling [ /solution/ErrorIfSyntaxError.cfc ]. /solution/ErrorIfSyntaxError.cfc: Line: 1 Col: 0 - 'componentTTT' was unexpected \nexpecting one of: 'abstract', 'final', Operator, Statement\ncomponentTTT@@ clazzzz\n^^^^^^^^^^^^",
"version": 2,
"status": "error"
}
14 changes: 0 additions & 14 deletions tests/example-all-fail/Leap.cfc

This file was deleted.

Loading