Skip to content

Commit

Permalink
change structure of tasks
Browse files Browse the repository at this point in the history
  • Loading branch information
bastinjul committed Sep 6, 2019
1 parent 33dcdae commit d521735
Show file tree
Hide file tree
Showing 4,232 changed files with 103,235 additions and 4,158 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
File renamed without changes.
45 changes: 45 additions & 0 deletions $common/fragments/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import yaml
from pathlib import Path


# Using a yaml file, we can extract the kind of exercise/feedback
def config_file_to_dict(file_path):
# Check github wiki for more information
default_values = {
"has_feedback": False,
"quorum": 1.0,
"feedback_kind": None,
"coverage_stats": None,
"prohibited": {},
"plagiarism": False,
"external_libraries": None,
"status_message": {
0: "Your code has successfully passed all tests for this mission.",
1: "Your code failed all tests for this mission.",
2: "You used prohibited instructions (such as System.exit) : read carefully the assignment.",
3: "Your tests don't cover all cases.",
252: "The memory limit of your program is exceeded.",
253: "The time limit for running your program has been exceeded."
}
}

# no config file so use basic settings
if not Path(file_path).exists():
return default_values
else:

with open(file_path, "r") as stream:
# File must not be empty
load_config = yaml.load(stream)
# If no file given
if load_config is None:
return default_values
else:
# Merge dictionaries
# The ** operator doesn't correctly merged the dictionary "status_message", so monkey patching this
load_config["status_message"] = {
**default_values["status_message"],
**(load_config["status_message"] if "status_message" in load_config else {})
}

return {**default_values, **load_config}
57 changes: 57 additions & 0 deletions $common/fragments/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from pathlib import Path

#####################################
# CONSTANTS #
#####################################

# Current path of the run script of the task (/task)
CWD = Path.cwd()

# File extension
FILE_EXTENSION = ".java"

# Our code
# For Python 3.5 , we cannot use Path like object directly yet ( fixed in 3.6)
# we have to do : str(PathLibObject)

# Where we keep the good and wrong implementations
PATH_FLAVOUR = str(CWD / "flavour")

# Our templates
PATH_TEMPLATES = str(CWD / "templates")

# Source code to be tested
PATH_SRC = str(CWD / "src")

# .class storage path
PATH_CLASSES = str(CWD / "classes")

# Test runner
RUNNER_PATH = str(CWD / "src" / "StudentTestRunner.java")

# Runner name as expected from java (Java wants a package name and not the full path so)
RUNNER_JAVA_NAME = str(Path(RUNNER_PATH).relative_to(CWD)).replace("/", ".")

# Config file to generate feedback for every kind of exercises
FEEDBACK_REVIEW_PATH = str(CWD / "feedback_settings.yaml")

# JaCoCo needs a jar to execute its stuff
JAR_FILE = str(CWD / "task_evaluation.jar")

# Manifest for JAR FILE (since it ignores -cp option)
MANIFEST_FILE = str(CWD / "MANIFEST.MF")

# JaCoCo coverage file path
JACOCO_EXEC_FILE = str(CWD / "jacoco.exec")

# JaCoCo classfiles for report ( only take the useful one in flavour)
JACOCO_CLASS_FILES = [str(Path(PATH_CLASSES) / "flavour")]

# JaCoCo result file in xml
JACOCO_RESULT_FILE = str(CWD / "coverage_result.xml")

# Libraries folder
LIBS_FOLDER = "/course/common/libs"

# Default Libraries used in the runscript ( stored in LIBS_FOLDER )
DEFAULT_LIBRARIES = ["junit-4.12.jar", "hamcrest-core-1.3.jar", "JavaGrading.jar"]
31 changes: 31 additions & 0 deletions $common/fragments/coverage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from xml.etree import ElementTree as ET
from fragments.constants import *


# https://docs.python.org/3/library/xml.etree.elementtree.html
# Extract the stats given by Jacoco into a list so that we can use that later
def extract_stats(path_to_xml_file=JACOCO_RESULT_FILE):
tree = ET.parse(path_to_xml_file)
root = tree.getroot()
return [
{
"covered": int(coverage_data.get("covered")),
"missed": int(coverage_data.get("missed")),
"type": coverage_data.get("type")
}
for coverage_data in root.findall("./counter")
]


# Command to generate the result as a xml file from JaCoCo
# https://stackoverflow.com/questions/47717538/usage-of-jacococli-jar-with-multiple-classfiles
def generate_coverage_report(exec_file=JACOCO_EXEC_FILE,
classes_path=JACOCO_CLASS_FILES,
xml_output=JACOCO_RESULT_FILE):
return "{} -jar {} report {} {} --xml {}".format(
"java",
str(Path(LIBS_FOLDER) / "jacococli.jar"),
exec_file,
' '.join(["--classfiles {}".format(str(c)) for c in classes_path]),
xml_output
)
77 changes: 77 additions & 0 deletions $common/fragments/extraction.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import re

from fragments import coverage, helper


# Extract score and message that use JavaGrading
def extract_java_grading_result(result, feedback_settings):
# we have a feedback from JavaGrading
# Fetch total for INGInious

# WARNING : there could be multiple TOTAL in the stdout
# So we must merge everything

# Strips not useful things of JavaGrading

# Display array of test suite results only if student didn't use prohib' instruction
display = False if result.returncode == 2 else True

result_string = result.stdout.replace("--- GRADE ---", "").replace("--- END GRADE ---", "")
regex_strip = r"TOTAL \d*[.]?\d*\/\d*[.]?\d*"
regex_strip2 = r"TOTAL WITHOUT IGNORED \d*[.]?\d*\/\d*[.]?\d*"

# Remove match
result_string = re.sub(regex_strip, '', result_string)
result_string = re.sub(regex_strip2, '', result_string)

regex = '\*\*TOTAL\*\*",,\*\*(\d*[.]?\d*\/\d*[.]?\d*)\*\*'
matches = re.findall(regex, result.stdout)

# convert everything in float
converted_results = [
[
float(item)
for item in match.split("/")
]
for match in matches
]

student_result, total_result = [sum(i) for i in zip(*converted_results)]

return student_result / total_result, result_string if display \
else feedback_settings["status_message"].get(result.returncode, "Uncommon Failure")


# Extract result from JaCoCo
def extract_jacoco_result(feedback_settings):
coverage_stats = feedback_settings["coverage_stats"]
# No coverage stats , cannot evaluate this
if not coverage_stats:
return 0.0, "NO COVERAGE CRITERIA WERE GIVEN"
else:
# Generate the xml report file
gen_report = coverage.generate_coverage_report()
print("GENERATING THE EXEC FILE : {}".format(gen_report))
helper.run_command(gen_report)

# extract stats
coverage_result = coverage.extract_stats()
filtered_coverage_result = [x for x in coverage_result if x["type"] in coverage_stats]
print(filtered_coverage_result)

# generate score and message

covered = sum(x["covered"] for x in filtered_coverage_result)
total = covered + sum(x["missed"] for x in filtered_coverage_result)

msg = '\n'.join(
[
"{}:\t{}/{}".format(c["type"], c["covered"], c["covered"] + c["missed"])
for c in filtered_coverage_result
]
)

# For security (if report gives 0 at total, don't try to apply division)
ratio = covered / total if total > 0 else 0.0

return ratio, msg
72 changes: 72 additions & 0 deletions $common/fragments/feedback.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import sys

from inginious import feedback, rst

from fragments import helper
from fragments.extraction import *


# Throw a fatal error if the given code doesn't compile
def compilation_feedback(result):
if result.returncode != 0:
msg = "Your file did not compile : please don't use INGINIOUS as an IDE ..."
print(result.stderr)
feedback.set_global_feedback(msg)
feedback.set_global_result("failed")
feedback.set_grade(0.0)
sys.exit(0)


# Generate the final message(s) to student
def result_feedback(result, feedback_settings):
# Top level message
msg = "{}\n".format(feedback_settings["status_message"].get(result.returncode, "Uncommon Failure"))

# if we have a feedback, use it
if feedback_settings["has_feedback"]:

# JavaGrading
if feedback_settings["feedback_kind"] == "JavaGrading":
score_ratio, msg = extract_java_grading_result(result, feedback_settings)
# To prevent some genius to have success grade with a prohibited
score_ratio = 0.0 if result.returncode == 2 else score_ratio
feedback_result(score_ratio, feedback_settings)
feedback.set_global_feedback(msg, True)

# JaCoCo
if feedback_settings["feedback_kind"] == "JaCoCo":
if result.returncode == 0:
score_ratio, msg = extract_jacoco_result(feedback_settings)
feedback_result(score_ratio, feedback_settings)
message_index = 0 if score_ratio >= feedback_settings["quorum"] else 3
msg2 = "{}\n".format(feedback_settings["status_message"].get(message_index, "Uncommon Failure"))
feedback.set_global_feedback(msg2, True)
feedback.set_global_feedback(rst.get_codeblock("java", msg), True)
else:
feedback.set_global_feedback(msg, True)
feedback_result(0.0, feedback_settings)

# For exercises with binary result : 0 or 100
else:
feedback.set_global_feedback(msg, True)
score_ratio = 1.0 if result.returncode == 0 else 0.0
feedback_result(score_ratio, feedback_settings)


# Decision function to decide if the student pass the required level for this task
def feedback_result(score_ratio, feedback_settings):
result = "success" if score_ratio >= feedback_settings["quorum"] else "failed"
feedback.set_global_result(result)
# If coverage exercise, give him 100% if >= quorum else the basic score
updated_score_ratio = 1.0 if result == "success" and feedback_settings["feedback_kind"] == "JaCoCo" else score_ratio
feedback.set_grade(updated_score_ratio * 100)


def handle_prohibited_statements(feedback_settings):
result = helper.contains_prohibited_statement(feedback_settings)
if result:
msg = feedback_settings["status_message"].get(2, "Uncommon Failure")
feedback.set_global_feedback(msg)
feedback.set_global_result("failed")
feedback.set_grade(0.0)
sys.exit(0)
Loading

0 comments on commit d521735

Please sign in to comment.