Example - Python Standalone#
An example for a simple Python standalone application
Exercise:
Take a user input ended in a new line character, up to 255 characters long, and echo it back to the next line
Example solution:
# Take a user input ended in a new line character, up to 255 characters long, and echo it back to the next line
def main():
a = input()
print(a)
if __name__ == "__main__":
main()
Example tester:
from py_eval_util import Evaluator, levenshtein_ratio
def test_file():
test_cases = [
"foo",
"bar",
"Hello World",
"a" * 254
]
# For a given input, return a score and feedback
def combined_provider(i, result):
score = 0
# A variable named consumed_input is injected when run_module is selected
# It holds a list of all the lines read from stdin
if len(consumed_input) == 0: # No lines read from consumed_input
feedback = "You didn't read in anything"
elif len(result) == 0:
if consumed_input[0] == test_cases[i]:
feedback = "You've read the input, however failed to print anything"
score = 0.2
else:
feedback = "Nothing was printed"
elif consumed_input.too_many_reads: # If too_many_reads is set the tested module tried to read more times than specified as input
feedback = "You've tried to get inputs too many times"
else:
line = result[0]
score = levenshtein_ratio(line, test_cases[i]) # Fuzzy compare the returned and expected
if score >= 1:
feedback = f"You've printed '{test_cases[i]}' correctly"
else:
feedback = f"Expected: '{test_cases[i]}' printed: '{line}'"
if len(result) > 1:
score -= 0.5
feedback += ". You've printed more than one thing."
feedback += " : PASS!" if score >= 1 else " : FAIL!"
return score, feedback
e = Evaluator()
# Set the name for the question. Evaluator will start calling at i = 0 and increase it.
# If None is returned, that marks the limit to the number of questions
e.with_name(
lambda i: f"Basic input: {test_cases[i]}" if i < len(test_cases) else None)
# Select to just import a module as a __main__
e.run_module()
# Set the console input (stdin) for a given test case
e.with_input(
lambda i: test_cases[i])
# Let the score and feedback provider decide the score and the feedback
e.with_score_and_feedback(combined_provider)
# Run the evaluator
e.start()
if __name__ == "__main__":
test_file()
Example config file:
{
"tests": [
{
"type": "syntax",
"max_score": 10.0,
"number": "1",
"tags": [],
"visibility": "visible"
},
{
"type": "functionality",
"max_score": 70.0,
"number": "2",
"tags": [],
"visibility": "visible",
"tester_file": "path/to/tester.py"
},
{
"type": "comments",
"max_score": 10.0,
"number": "3",
"tags": [],
"visibility": "visible"
},
{
"type": "static_analysis",
"max_score": 10.0,
"number": "4",
"tags": [],
"visibility": "visible"
}
]
}