diff --git a/.dockerignore b/.dockerignore index 3eb8fac..a46f29f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,6 @@ Dockerfile bin/run-in-docker.sh bin/run-tests-in-docker.sh bin/run-tests.sh +bin/validate-track-in-docker.sh tests/ +track/ diff --git a/.gitignore b/.gitignore index c6f4076..90b351a 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ /tests/**/*/results.json +track/ diff --git a/Dockerfile b/Dockerfile index bd3578c..7213825 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,51 @@ -FROM alpine:3.18 +FROM ubuntu:24.04 AS builder -# install packages required to run the tests -RUN apk add --no-cache jq coreutils +ENV LUA_VER="5.4.8" +ENV LUA_CHECKSUM="4f18ddae154e793e46eeab727c59ef1c0c0c2b744e7b94219710d76f530629ae" +ENV LUAROCKS_VER="3.12.0" +ENV LUAROCKS_GPG_KEY="3FD8F43C2BB3C478" +RUN apt-get update && \ + apt-get install -y curl gcc make unzip gnupg git && \ + rm -rf /var/lib/apt/lists/* && \ + apt-get purge --auto-remove && \ + apt-get clean + +RUN curl -R -O -L http://www.lua.org/ftp/lua-${LUA_VER}.tar.gz && \ + [ "$(sha256sum lua-${LUA_VER}.tar.gz | cut -d' ' -f1)" = "${LUA_CHECKSUM}" ] && \ + tar -zxf lua-${LUA_VER}.tar.gz && \ + cd lua-${LUA_VER} && \ + make all install && \ + cd .. && \ + rm lua-${LUA_VER}.tar.gz && \ + rm -rf lua-${LUA_VER} + +RUN curl -R -O -L https://luarocks.org/releases/luarocks-${LUAROCKS_VER}.tar.gz && \ + curl -R -O -L https://luarocks.org/releases/luarocks-${LUAROCKS_VER}.tar.gz.asc && \ + gpg --keyserver keyserver.ubuntu.com --recv-keys ${LUAROCKS_GPG_KEY} && \ + gpg --verify luarocks-${LUAROCKS_VER}.tar.gz.asc luarocks-${LUAROCKS_VER}.tar.gz && \ + tar -zxpf luarocks-${LUAROCKS_VER}.tar.gz && \ + cd luarocks-${LUAROCKS_VER} && \ + ./configure && make && make install && \ + cd .. && \ + rm luarocks-${LUAROCKS_VER}.tar.gz.asc && \ + rm luarocks-${LUAROCKS_VER}.tar.gz && \ + rm -rf luarocks-${LUAROCKS_VER} + +RUN luarocks install busted +RUN luarocks install alt-getopt +RUN luarocks install moonscript + +FROM ubuntu:24.04 + +RUN apt-get update && \ + apt-get install -y jq && \ + rm -rf /var/lib/apt/lists/* && \ + apt-get purge --auto-remove && \ + apt-get clean + +COPY --from=builder /usr/local /usr/local + +COPY . /opt/test-runner WORKDIR /opt/test-runner -COPY . . -ENTRYPOINT ["/opt/test-runner/bin/run.sh"] +ENTRYPOINT ["/opt/test-runner/bin/run.moon"] diff --git a/README.md b/README.md index 6636269..937970e 100644 --- a/README.md +++ b/README.md @@ -2,24 +2,14 @@ The Docker image to automatically run tests on MoonScript solutions submitted to [Exercism]. -## Getting started - -Build the test runner, conforming to the [Test Runner interface specification](https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md). -Update the files to match your track's needs. At the very least, you'll need to update `bin/run.sh`, `Dockerfile` and the test solutions in the `tests` directory - -- Tip: look for `TODO:` comments to point you towards code that need updating -- Tip: look for `OPTIONAL:` comments to point you towards code that _could_ be useful -- Tip: if it proves impossible for the Docker image to work on a read-only filesystem, remove the `--read-only` flag from the `bin/run-in-docker.sh` and `bin/run-tests-in-docker.sh` files. - We don't yet enforce a read-only file system in production, but we might in the future! - ## Run the test runner To run the tests of a single solution, do the following: 1. Open a terminal in the project's root -2. Run `./bin/run.sh ` +2. Run `bin/run.moon ${exercise_slug} ${solution_dir} ${output_dir}` -Once the test runner has finished, its results will be written to `/results.json`. +Once the test runner has finished, its results will be written to `${output_dir}/results.json`. ## Run the test runner on a solution using Docker @@ -28,9 +18,9 @@ _This script is provided for testing purposes, as it mimics how test runners run To run the tests of a single solution using the Docker image, do the following: 1. Open a terminal in the project's root -2. Run `./bin/run-in-docker.sh ` +2. Run `./bin/run-in-docker.sh ${exercise_slug} ${solution_dir} ${output_dir}` -Once the test runner has finished, its results will be written to `/results.json`. +Once the test runner has finished, its results will be written to `${output_dir}/results.json`. ## Run the tests @@ -39,9 +29,9 @@ To run the tests to verify the behavior of the test runner, do the following: 1. Open a terminal in the project's root 2. Run `./bin/run-tests.sh` -These are [golden tests][golden] that compare the `results.json` generated by running the current state of the code against the "known good" `tests//expected_results.json`. All files created during the test run itself are discarded. +These are [golden tests][golden] that compare the `results.json` generated by running the current state of the code against the "known good" `tests/${test_name}/expected_results.json`. All files created during the test run itself are discarded. -When you've made modifications to the code that will result in a new "golden" state, you'll need to update the affected `tests//expected_results.json` file(s). +When you've made modifications to the code that will result in a new "golden" state, you'll need to update the affected `tests/${test_name}/expected_results.json` file(s). ## Run the tests using Docker @@ -52,12 +42,14 @@ To run the tests to verify the behavior of the test runner using the Docker imag 1. Open a terminal in the project's root 2. Run `./bin/run-tests-in-docker.sh` -These are [golden tests][golden] that compare the `results.json` generated by running the current state of the code against the "known good" `tests//expected_results.json`. All files created during the test run itself are discarded. +These are [golden tests][golden] that compare the `results.json` generated by running the current state of the code against the "known good" `tests/${test_name}/expected_results.json`. All files created during the test run itself are discarded. -When you've made modifications to the code that will result in a new "golden" state, you'll need to update the affected `tests//expected_results.json` file(s). +When you've made modifications to the code that will result in a new "golden" state, you'll need to update the affected `tests/${test_name}/expected_results.json` file(s). ## Benchmarking +**_NOTE: not implemented_** + There are two scripts you can use to benchmark the test runner: 1. `./bin/benchmark.sh`: benchmark the test runner code diff --git a/bin/benchmark-in-docker.sh b/bin/benchmark-in-docker.sh old mode 100755 new mode 100644 diff --git a/bin/benchmark.sh b/bin/benchmark.sh old mode 100755 new mode 100644 diff --git a/bin/run-in-docker.sh b/bin/run-in-docker.sh index 3672f7b..5c47d02 100755 --- a/bin/run-in-docker.sh +++ b/bin/run-in-docker.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e # Synopsis: # Run the test runner on a solution using the test runner Docker image. @@ -6,22 +7,19 @@ # Arguments: # $1: exercise slug -# $2: path to solution folder -# $3: path to output directory +# $2: absolute path to solution folder +# $3: absolute path to output directory # Output: # Writes the test results to a results.json file in the passed-in output directory. # The test results are formatted according to the specifications at https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md # Example: -# ./bin/run-in-docker.sh two-fer path/to/solution/folder/ path/to/output/directory/ - -# Stop executing when a command returns a non-zero return code -set -e +# ./bin/run-in-docker.sh two-fer /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/ # If any required arguments is missing, print the usage and exit if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then - echo "usage: ./bin/run-in-docker.sh exercise-slug path/to/solution/folder/ path/to/output/directory/" + echo "usage: $0 exercise-slug /absolute/path/to/solution/folder/ /absolute/path/to/output/directory/" exit 1 fi @@ -43,4 +41,4 @@ docker run \ --mount type=bind,src="${solution_dir}",dst=/solution \ --mount type=bind,src="${output_dir}",dst=/output \ --mount type=tmpfs,dst=/tmp \ - exercism/moonscript-test-runner "${slug}" /solution /output + exercism/moonscript-test-runner "${slug}" /solution /output diff --git a/bin/run-tests-in-docker.sh b/bin/run-tests-in-docker.sh index 1d4dcf9..1c38f02 100755 --- a/bin/run-tests-in-docker.sh +++ b/bin/run-tests-in-docker.sh @@ -1,4 +1,5 @@ #!/usr/bin/env sh +set -e # Synopsis: # Test the test runner Docker image by running it against a predefined set of @@ -12,9 +13,6 @@ # Example: # ./bin/run-tests-in-docker.sh -# Stop executing when a command returns a non-zero return code -set -e - # Build the Docker image docker build --rm -t exercism/moonscript-test-runner . diff --git a/bin/run-tests.sh b/bin/run-tests.sh index 2bb47b8..a4b7c07 100755 --- a/bin/run-tests.sh +++ b/bin/run-tests.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash # Synopsis: # Test the test runner by running it against a predefined set of solutions @@ -8,30 +8,20 @@ # Outputs the diff of the expected test results against the actual test results # generated by the test runner. +# Exit status: the number of failed tests + # Example: # ./bin/run-tests.sh exit_code=0 -# Iterate over all test directories for test_dir in tests/*; do - test_dir_name=$(basename "${test_dir}") - test_dir_path=$(realpath "${test_dir}") - - bin/run.sh "${test_dir_name}" "${test_dir_path}" "${test_dir_path}" - - # OPTIONAL: Normalize the results file - # If the results.json file contains information that changes between - # different test runs (e.g. timing information or paths), you should normalize - # the results file to allow the diff comparison below to work as expected - - file="results.json" - expected_file="expected_${file}" - echo "${test_dir_name}: comparing ${file} to ${expected_file}" + test_name=$(basename "${test_dir}") + test_path=$(realpath "${test_dir}") - if ! diff "${test_dir_path}/${file}" "${test_dir_path}/${expected_file}"; then - exit_code=1 - fi + bin/run.moon "${test_name}" "${test_path}" "${test_path}" \ + && bin/test-result-compare.lua "${test_dir}/results.json" "${test_dir}/expected_results.json" \ + || (( ++exit_code )) done exit ${exit_code} diff --git a/bin/run.moon b/bin/run.moon new file mode 100755 index 0000000..8d78bb8 --- /dev/null +++ b/bin/run.moon @@ -0,0 +1,192 @@ +#! /usr/bin/env moon + +require 'moonscript' +lfs = require 'lfs' +json = (require 'dkjson').use_lpeg! +getopt = require 'alt_getopt' +local verbose + +import p from require 'moon' + + +-- ----------------------------------------------------------- +show_help = (args) -> + print "Usage: #{args[0]} [-h] [-v] slug solution-dir output-dir" + print "Where: -h show this help" + print " -v verbose: show the output JSON" + os.exit! + + +-- ----------------------------------------------------------- +file_exists = (path) -> + attrs = lfs.attributes path + not not attrs + +is_directory = (path) -> + attrs = lfs.attributes path + attrs and attrs.mode == 'directory' + +realpath = (path) -> + fh = io.popen "realpath #{path}" + dir = fh\read! + fh\close! + dir + +validate = (args) -> + show_help args unless #args == 3 + {slug, src_dir, dest_dir} = args + assert slug != '', 'First arg, the slug, cannot be empty' + assert is_directory(src_dir), 'Second arg, the solution directory, must be a directory' + assert is_directory(dest_dir), 'Third arg, the output directory, must be a directory' + + slug, realpath(src_dir), realpath(dest_dir) + + +-- ----------------------------------------------------------- +run_tests = (slug, dir) -> + ok, err = lfs.chdir dir + assert ok, err + + -- unskip tests + cmd = "perl -i.bak -pe 's{^\\s*\\Kpending\\b}{it}' *_spec.moon" + ok, result_type, status = os.execute cmd + assert ok + + -- launch `busted` + fh = io.popen 'busted -o json', 'r' + json_output = fh\read 'a' + ok, exit_type, exit_status = fh\close! + + if exit_type == 'signal' + return { + status: 'error', + message: json_output + } + + data = json.decode json_output + + if not data + return { + status: 'error', + message: json_output + } + + if exit_status != 0 and #data.successes == 0 and #data.failures == 0 and #data.errors > 0 + return { + status: 'error', + message: data.errors[1].message + } + + results = {} + + for test in *data.successes + results[test.element.name] = { + status: 'pass', + name: test.element.name, + } + + for test in *data.failures + results[test.element.name] = { + status: 'fail', + name: test.element.name, + message: test.trace.message, + } + + results + + +-- ----------------------------------------------------------- +get_test_bodies = (slug, dir) -> + ok, err = lfs.chdir dir + assert ok, err + + order = {} + bodies = {} + + test_file = "#{slug\gsub('-', '_')}_spec.moon" + return unless file_exists test_file -- let `busted` handle the error messaging + + fh = io.open test_file, 'r' + + pattern = (word) -> '^%s+' .. word .. '%s+[\'"](.+)[\'"],%s+->' + patterns = it: pattern('it'), pending: pattern('pending') + + local test_name + test_body = {} + in_test = false + + for line in fh\lines! + if line\match '^%s+describe ' + if test_name + bodies[test_name] = table.concat test_body, '\n' + test_body = {} + test_name = nil + in_test = false + + m = line\match(patterns.it) or line\match(patterns.pending) + if not m + if in_test + table.insert test_body, line + else + table.insert order, m + if in_test + bodies[test_name] = table.concat test_body, '\n' + test_body = {} + test_name = m + in_test = true + + fh\close! + bodies[test_name] = table.concat test_body, '\n' + order, bodies + + +-- ----------------------------------------------------------- +write_results = (slug, test_results, names, bodies, dir) -> + ok, err = lfs.chdir dir + assert ok, "#{err}: #{dir}" + + results = version: 2, status: nil, tests: {} + + if test_results.status + -- this was an error result + results.status = test_results.status + results.message = test_results.message + + else + status = 'pass' + for name in *names + test = test_results[name] + assert test, "no test result for #{name}" + status = 'fail' if test.status == 'fail' + test.test_code = bodies[name] + table.insert results.tests, test + results.status = status + + fh = io.open 'results.json', 'w' + fh\write (json.encode results) .. '\n' + fh\close! + + os.execute "jq . results.json" if verbose + + +-- ----------------------------------------------------------- +main = (args) -> + opts, optind = getopt.get_opts args, 'hv', {} + + show_help args if opts.h + verbose = not not opts.v + table.remove args, 1 for _ = 1, optind - 1 + + slug, src_dir, dest_dir = validate args + + print "#{slug}: testing ..." + + test_names_ordered, test_code = get_test_bodies slug, src_dir + + test_results = run_tests slug, src_dir + + write_results slug, test_results, test_names_ordered, test_code, dest_dir + + print "#{slug}: ... done" + +main arg diff --git a/bin/run.sh b/bin/run.sh deleted file mode 100755 index f1e8b78..0000000 --- a/bin/run.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env sh - -# Synopsis: -# Run the test runner on a solution. - -# Arguments: -# $1: exercise slug -# $2: path to solution folder -# $3: path to output directory - -# Output: -# Writes the test results to a results.json file in the passed-in output directory. -# The test results are formatted according to the specifications at https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md - -# Example: -# ./bin/run.sh two-fer path/to/solution/folder/ path/to/output/directory/ - -# If any required arguments is missing, print the usage and exit -if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then - echo "usage: ./bin/run.sh exercise-slug path/to/solution/folder/ path/to/output/directory/" - exit 1 -fi - -slug="$1" -solution_dir=$(realpath "${2%/}") -output_dir=$(realpath "${3%/}") -results_file="${output_dir}/results.json" - -# Create the output directory if it doesn't exist -mkdir -p "${output_dir}" - -echo "${slug}: testing..." - -# Run the tests for the provided implementation file and redirect stdout and -# stderr to capture it -test_output=$(false) -# TODO: substitute "false" with the actual command to run the test: -# test_output=$(command_to_run_tests 2>&1) - -# Write the results.json file based on the exit code of the command that was -# just executed that tested the implementation file -if [ $? -eq 0 ]; then - jq -n '{version: 1, status: "pass"}' > ${results_file} -else - # OPTIONAL: Sanitize the output - # In some cases, the test output might be overly verbose, in which case stripping - # the unneeded information can be very helpful to the student - # sanitized_test_output=$(printf "${test_output}" | sed -n '/Test results:/,$p') - - # OPTIONAL: Manually add colors to the output to help scanning the output for errors - # If the test output does not contain colors to help identify failing (or passing) - # tests, it can be helpful to manually add colors to the output - # colorized_test_output=$(echo "${test_output}" \ - # | GREP_COLOR='01;31' grep --color=always -E -e '^(ERROR:.*|.*failed)$|$' \ - # | GREP_COLOR='01;32' grep --color=always -E -e '^.*passed$|$') - - jq -n --arg output "${test_output}" '{version: 1, status: "fail", message: $output}' > ${results_file} -fi - -echo "${slug}: done" diff --git a/bin/test-result-compare.lua b/bin/test-result-compare.lua new file mode 100755 index 0000000..2dd8c60 --- /dev/null +++ b/bin/test-result-compare.lua @@ -0,0 +1,25 @@ +#!/usr/bin/env lua + +local json = require('dkjson') +local tablex = require('pl.tablex') +local pretty = require('pl.pretty') + +local function load_json(file) + local fd = io.open(file) + return json.decode(fd:read('a')) +end + +local result = load_json(arg[1]) +local expected = load_json(arg[2]) + +if not tablex.deepcompare(result, expected) then + print('\n========[ RESULT ]========\n') + print(pretty.write(result)) + + print('\n========[ EXPECTED ]========\n') + print(pretty.write(expected)) + + os.exit(1) +end + +os.exit(0) diff --git a/bin/validate-track-in-docker b/bin/validate-track-in-docker new file mode 100755 index 0000000..f2c4b7b --- /dev/null +++ b/bin/validate-track-in-docker @@ -0,0 +1,33 @@ +#!/usr/bin/env sh + +# Synopsis: +# Test the test runner Docker image by running it against the example solutions +# from the language track. The test runner Docker image is built +# automatically. The track is cloned from git into a subdirectory. + +# Output: +# The actual test results generated by the test runner Docker image. + +# Example: +# ./bin/validate-track-in-docker.sh + +# Stop executing when a command returns a non-zero return code +set -e + +# Fetch the moonscript track +rm -rf track/ +git clone --depth 1 https://github.com/exercism/moonscript.git track + +# Build the Docker image +docker build --rm -t exercism/moonscript-test-runner . + +# Run the Docker image using the settings mimicking the production environment +docker run \ + --rm \ + --network none \ + --read-only \ + --mount type=bind,src="${PWD}/track",dst=/opt/test-runner/track \ + --mount type=tmpfs,dst=/tmp \ + --workdir /opt/test-runner \ + --entrypoint /opt/test-runner/track/bin/verify-exercises \ + exercism/moonscript-test-runner diff --git a/tests/.gitignore b/tests/.gitignore new file mode 100644 index 0000000..e345bae --- /dev/null +++ b/tests/.gitignore @@ -0,0 +1,2 @@ +*.bak +results.json diff --git a/tests/all-fail/expected_results.json b/tests/all-fail/expected_results.json deleted file mode 100644 index 9ef8b6f..0000000 --- a/tests/all-fail/expected_results.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": 1, - "status": "fail", - "message": "TODO: replace with correct output" -} diff --git a/tests/empty-file/expected_results.json b/tests/empty-file/expected_results.json deleted file mode 100644 index 9ef8b6f..0000000 --- a/tests/empty-file/expected_results.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": 1, - "status": "fail", - "message": "TODO: replace with correct output" -} diff --git a/tests/example-all-fail/.busted b/tests/example-all-fail/.busted new file mode 100644 index 0000000..86b84e7 --- /dev/null +++ b/tests/example-all-fail/.busted @@ -0,0 +1,5 @@ +return { + default = { + ROOT = { '.' } + } +} diff --git a/tests/example-all-fail/example_all_fail.moon b/tests/example-all-fail/example_all_fail.moon new file mode 100644 index 0000000..a8420de --- /dev/null +++ b/tests/example-all-fail/example_all_fail.moon @@ -0,0 +1,4 @@ +leap_year = (number) -> + number % 2 == 1 + +leap_year diff --git a/tests/example-all-fail/example_all_fail_spec.moon b/tests/example-all-fail/example_all_fail_spec.moon new file mode 100644 index 0000000..98e9954 --- /dev/null +++ b/tests/example-all-fail/example_all_fail_spec.moon @@ -0,0 +1,8 @@ +is_leap_year = require 'example_all_fail' + +describe 'leap', -> + it 'a known leap year', -> + assert.is_true is_leap_year 1996 + + it 'any old year', -> + assert.is_false is_leap_year 1997 diff --git a/tests/example-all-fail/expected_results.json b/tests/example-all-fail/expected_results.json new file mode 100644 index 0000000..b9866f8 --- /dev/null +++ b/tests/example-all-fail/expected_results.json @@ -0,0 +1 @@ +{"tests":[{"message":"Expected objects to be the same.\nPassed in:\n(boolean) false\nExpected:\n(boolean) true","test_code":" assert.is_true is_leap_year 1996\n","name":"a known leap year","status":"fail"},{"message":"Expected objects to be the same.\nPassed in:\n(boolean) true\nExpected:\n(boolean) false","test_code":" assert.is_false is_leap_year 1997","name":"any old year","status":"fail"}],"version":2,"status":"fail"} diff --git a/tests/example-empty-file/.busted b/tests/example-empty-file/.busted new file mode 100644 index 0000000..86b84e7 --- /dev/null +++ b/tests/example-empty-file/.busted @@ -0,0 +1,5 @@ +return { + default = { + ROOT = { '.' } + } +} diff --git a/tests/example-empty-file/example_empty_file.moon b/tests/example-empty-file/example_empty_file.moon new file mode 100644 index 0000000..e69de29 diff --git a/tests/example-empty-file/example_empty_file_spec.moon b/tests/example-empty-file/example_empty_file_spec.moon new file mode 100644 index 0000000..aa8e1f9 --- /dev/null +++ b/tests/example-empty-file/example_empty_file_spec.moon @@ -0,0 +1,14 @@ +is_leap_year = require 'example_empty_file' + +describe 'leap', -> + it 'a known leap year', -> + assert.is_true is_leap_year 1996 + + it 'any old year', -> + assert.is_false is_leap_year 1997 + + it 'turn of the 20th century', -> + assert.is_false is_leap_year 1900 + + it 'turn of the 21st century', -> + assert.is_true is_leap_year 2400 diff --git a/tests/example-empty-file/expected_results.json b/tests/example-empty-file/expected_results.json new file mode 100644 index 0000000..a15497a --- /dev/null +++ b/tests/example-empty-file/expected_results.json @@ -0,0 +1 @@ +{"status":"error","tests":[],"message":"./example_empty_file_spec.moon:5: attempt to call a boolean value (upvalue 'is_leap_year')","version":2} diff --git a/tests/example-partial-fail/.busted b/tests/example-partial-fail/.busted new file mode 100644 index 0000000..86b84e7 --- /dev/null +++ b/tests/example-partial-fail/.busted @@ -0,0 +1,5 @@ +return { + default = { + ROOT = { '.' } + } +} diff --git a/tests/example-partial-fail/example_partial_fail.moon b/tests/example-partial-fail/example_partial_fail.moon new file mode 100644 index 0000000..c338f3e --- /dev/null +++ b/tests/example-partial-fail/example_partial_fail.moon @@ -0,0 +1,7 @@ +leap_year = (number) -> + is_divisible_by = (a_number) -> + number % a_number == 0 + + is_divisible_by(401) or (is_divisible_by(4) and (not is_divisible_by(100))) + +leap_year diff --git a/tests/example-partial-fail/example_partial_fail_spec.moon b/tests/example-partial-fail/example_partial_fail_spec.moon new file mode 100644 index 0000000..02de8d1 --- /dev/null +++ b/tests/example-partial-fail/example_partial_fail_spec.moon @@ -0,0 +1,14 @@ +is_leap_year = require 'example_partial_fail' + +describe 'leap', -> + it 'a known leap year', -> + assert.is_true is_leap_year 1996 + + it 'any old year', -> + assert.is_false is_leap_year 1997 + + it 'turn of the 20th century', -> + assert.is_false is_leap_year 1900 + + it 'turn of the 21st century', -> + assert.is_true is_leap_year 2400 diff --git a/tests/example-partial-fail/expected_results.json b/tests/example-partial-fail/expected_results.json new file mode 100644 index 0000000..09dee9e --- /dev/null +++ b/tests/example-partial-fail/expected_results.json @@ -0,0 +1 @@ +{"status":"fail","tests":[{"test_code":" assert.is_true is_leap_year 1996\n","status":"pass","name":"a known leap year"},{"test_code":" assert.is_false is_leap_year 1997\n","status":"pass","name":"any old year"},{"test_code":" assert.is_false is_leap_year 1900\n","status":"pass","name":"turn of the 20th century"},{"test_code":" assert.is_true is_leap_year 2400","message":"Expected objects to be the same.\nPassed in:\n(boolean) false\nExpected:\n(boolean) true","name":"turn of the 21st century","status":"fail"}],"version":2} diff --git a/tests/example-success/.busted b/tests/example-success/.busted new file mode 100644 index 0000000..86b84e7 --- /dev/null +++ b/tests/example-success/.busted @@ -0,0 +1,5 @@ +return { + default = { + ROOT = { '.' } + } +} diff --git a/tests/example-success/example_success.moon b/tests/example-success/example_success.moon new file mode 100644 index 0000000..47d7028 --- /dev/null +++ b/tests/example-success/example_success.moon @@ -0,0 +1,7 @@ +leap_year = (number) -> + is_divisible_by = (a_number) -> + number % a_number == 0 + + is_divisible_by(400) or (is_divisible_by(4) and (not is_divisible_by(100))) + +leap_year diff --git a/tests/example-success/example_success_spec.moon b/tests/example-success/example_success_spec.moon new file mode 100644 index 0000000..8127303 --- /dev/null +++ b/tests/example-success/example_success_spec.moon @@ -0,0 +1,26 @@ +is_leap_year = require 'example_success' + +describe 'leap', -> + it 'a known leap year', -> + assert.is_true is_leap_year 1996 + + it 'any old year', -> + assert.is_false is_leap_year 1997 + + it 'turn of the 20th century', -> + assert.is_false is_leap_year 1900 + + it 'turn of the 21st century', -> + assert.is_true is_leap_year 2400 + + it 'handles tests with parens in strings', -> + s = ')' + assert.is_true true + + it "handles test names with 'apostrophes'", -> + assert.is_true true + + it 'handles tests with multiple lines in the body', -> + assert.is_false false + + assert.is_true true diff --git a/tests/example-success/expected_results.json b/tests/example-success/expected_results.json new file mode 100644 index 0000000..be1a520 --- /dev/null +++ b/tests/example-success/expected_results.json @@ -0,0 +1 @@ +{"tests":[{"name":"a known leap year","status":"pass","test_code":" assert.is_true is_leap_year 1996\n"},{"name":"any old year","status":"pass","test_code":" assert.is_false is_leap_year 1997\n"},{"name":"turn of the 20th century","status":"pass","test_code":" assert.is_false is_leap_year 1900\n"},{"name":"turn of the 21st century","status":"pass","test_code":" assert.is_true is_leap_year 2400\n"},{"name":"handles tests with parens in strings","status":"pass","test_code":" s = ')'\n assert.is_true true\n"},{"name":"handles test names with 'apostrophes'","status":"pass","test_code":" assert.is_true true\n"},{"name":"handles tests with multiple lines in the body","status":"pass","test_code":" assert.is_false false\n\n assert.is_true true"}],"status":"pass","version":2} diff --git a/tests/example-syntax-error/.busted b/tests/example-syntax-error/.busted new file mode 100644 index 0000000..86b84e7 --- /dev/null +++ b/tests/example-syntax-error/.busted @@ -0,0 +1,5 @@ +return { + default = { + ROOT = { '.' } + } +} diff --git a/tests/example-syntax-error/example_syntax_error.moon b/tests/example-syntax-error/example_syntax_error.moon new file mode 100644 index 0000000..9f83213 --- /dev/null +++ b/tests/example-syntax-error/example_syntax_error.moon @@ -0,0 +1 @@ +leap_year = diff --git a/tests/example-syntax-error/example_syntax_error_spec.moon b/tests/example-syntax-error/example_syntax_error_spec.moon new file mode 100644 index 0000000..cc23079 --- /dev/null +++ b/tests/example-syntax-error/example_syntax_error_spec.moon @@ -0,0 +1,8 @@ +is_leap_year = require 'example_syntax_error' + +describe 'leap', -> + it 'a known leap year', -> + assert.is_true is_leap_year 1996 + + it 'any old year', -> + assert.is_false is_leap_year 1997 diff --git a/tests/example-syntax-error/expected_results.json b/tests/example-syntax-error/expected_results.json new file mode 100644 index 0000000..be52766 --- /dev/null +++ b/tests/example-syntax-error/expected_results.json @@ -0,0 +1 @@ +{"tests":[],"version":2,"message":"Failed to encode test results to json: type 'function' is not supported by JSON.\n","status":"error"} diff --git a/tests/partial-fail/expected_results.json b/tests/partial-fail/expected_results.json deleted file mode 100644 index 9ef8b6f..0000000 --- a/tests/partial-fail/expected_results.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": 1, - "status": "fail", - "message": "TODO: replace with correct output" -} diff --git a/tests/success/expected_results.json b/tests/success/expected_results.json deleted file mode 100644 index 6c2223e..0000000 --- a/tests/success/expected_results.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "version": 1, - "status": "pass" -} diff --git a/tests/syntax-error/expected_results.json b/tests/syntax-error/expected_results.json deleted file mode 100644 index afc4d4e..0000000 --- a/tests/syntax-error/expected_results.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "version": 1, - "status": "error", - "message": "TODO: replace with correct output" -}