Skip to content

Commit

Permalink
Merge pull request #196 from IFB-ElixirFr/new-terms
Browse files Browse the repository at this point in the history
v1.2.3 new terms button
  • Loading branch information
albangaignard authored Jun 1, 2023
2 parents 2e51e82 + 6620606 commit 71146c5
Show file tree
Hide file tree
Showing 20 changed files with 1,135 additions and 269 deletions.
4 changes: 1 addition & 3 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -814,8 +814,7 @@ def evaluate_fairmetrics(json, metric_name, client_metric_id, url):


def evaluate_fc_metrics(metric_name, client_metric_id, url):
# print("OK FC Metrics")
# print(cache.get("TOTO"))
# print(metric_name)
# print(METRICS_CUSTOM)

dev_logger.info("Evaluating FAIR-Checker metric")
Expand Down Expand Up @@ -880,7 +879,6 @@ def evaluate_fc_metrics(metric_name, client_metric_id, url):
# "name": name,
}
emit("done_" + client_metric_id, emit_json)
dev_logger.info("DONE our own metric !")


@socketio.on("quick_structured_data_search")
Expand Down
77 changes: 77 additions & 0 deletions metrics/A12_Impl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from metrics.AbstractFAIRMetrics import AbstractFAIRMetrics
import validators
from metrics.recommendation import json_rec


class A12_Impl(AbstractFAIRMetrics):
"""
GOAL : retrieve embedded semantic annotations
"""

def __init__(self, web_resource=None):
super().__init__(web_resource)
self.name = "Authorisation procedure or access rights"
self.id = "16"
self.principle = "https://w3id.org/fair/principles/terms/A1.2"
self.principle_tag = "A1.2"
self.implem = "FAIR-Checker"
self.desc = """
The protocol allows for an authentication and authorisation procedure where necessary. <br>
FAIR-Checker verifies if access rights are specified in metadata through terms
odrl:hasPolicy, dct:rights, dct:accessRights, or dct:license.
"""

def weak_evaluate(self):
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
return eval

def strong_evaluate(self):
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)

checked_properties = """
odrl:hasPolicy
dct:rights
dct:accessRights
dct:license
schema:license
"""
query_prov = (
self.COMMON_SPARQL_PREFIX
+ """
ASK {
VALUES ?p { """
+ checked_properties
+ """ } .
?s ?p ?o .
}
"""
)

eval.log_info(
"Checking that at least one of the access policy properties is found in metadata:\n"
+ checked_properties
)
res = self.get_web_resource().get_rdf().query(query_prov)
for bool_res in res:
if bool_res:
eval.log_info(
"At least one of the access policy properties was found in metadata !"
)
eval.set_score(2)
return eval
else:
eval.log_info(
"None of the access policy properties were found in metadata !"
)
eval.set_recommendations(
json_rec["A12"]["reco1"]
+ checked_properties
+ """
"""
)
eval.set_score(0)
return eval
6 changes: 3 additions & 3 deletions metrics/AbstractFAIRMetrics.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
from abc import ABC, abstractmethod
import logging
import sys
from io import StringIO
from metrics.Evaluation import Result, Evaluation
from metrics.Evaluation import Evaluation


class AbstractFAIRMetrics(ABC):

COMMON_SPARQL_PREFIX = """
PREFIX schema: <http://schema.org/>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX doap: <http://usefulinc.com/ns/doap#>
PREFIX dbpedia-owl: <http://dbpedia.org/ontology/>
PREFIX cc: <http://creativecommons.org/ns#>
Expand All @@ -19,6 +18,7 @@ class AbstractFAIRMetrics(ABC):
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX pav: <http://purl.org/pav/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX odrl: <http://www.w3.org/ns/odrl/2/>
"""

cache = {}
Expand Down
14 changes: 1 addition & 13 deletions metrics/F1A_Impl.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,7 @@
import logging
import json

import requests
from pathlib import Path
from urllib.parse import urlparse
import logging
from rdflib import URIRef

from metrics.AbstractFAIRMetrics import AbstractFAIRMetrics
from datetime import timedelta
from metrics.FairCheckerExceptions import FairCheckerException
from metrics.Evaluation import Evaluation
import validators
import re
from metrics.recommendation import json_rec


Expand All @@ -31,8 +20,7 @@ def __init__(self, web_resource=None):
self.principle_tag = "F1A"
self.implem = "FAIR-Checker"
self.desc = """
FAIRChecker check that the resource identifier is an URL that can be reach, meaning it is unique, it is even
better if the URL refer to a DOI.
FAIRChecker checks that the resource identifier is a reachable URL. It's better if the URL is persistent (WebID, PURL or DOI).
"""

def weak_evaluate(self, eval=None) -> Evaluation:
Expand Down
73 changes: 64 additions & 9 deletions metrics/F2A_Impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,12 @@ def __init__(self, web_resource=None):
self.principle_tag = "F2A"
self.implem = "FAIR-Checker"
self.desc = """
FAIR-Checker verifies that at least one RDF triple can be found in metadata.
For weak assessment, FAIR-Checker verifies that at least one RDF triple can be found in metadata.
For strong assessment, it searches for at least one property in dct:title dct:description dct:accessURL
dct:downloadURL dcat:endpointURL dcat:endpointDescription.
"""

def weak_evaluate(self, eval=None) -> Evaluation:
if not eval:
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
return eval

def strong_evaluate(self, eval=None) -> Evaluation:
"""
at least one embedded RDF triple
"""
Expand All @@ -59,11 +54,71 @@ def strong_evaluate(self, eval=None) -> Evaluation:
+ " RDF triples were found, thus data is in a well structured graph format"
)
# print(len(kg))
eval.set_score(2)
eval.set_score(1)
return eval
eval.log_info(
"No RDF triples found, thus data is probably not structured as needed"
)
eval.set_recommendations(json_rec["F2A"]["reco1"])
eval.set_score(0)
return eval

def strong_evaluate(self, eval=None) -> Evaluation:

if not eval:
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)

checked_properties = """
dct:title
dct:description
dcat:accessURL
dcat:downloadURL
dcat:endpointDescription
dcat:endpointURL
"""
query_prov = (
self.COMMON_SPARQL_PREFIX
+ """
ASK {
VALUES ?p { """
+ checked_properties
+ """ } .
?s ?p ?o .
}
"""
)

eval.log_info(
"Checking that at least one of the access policy properties is found in metadata:\n"
+ checked_properties
)
res = self.get_web_resource().get_rdf().query(query_prov)
print()
print(res)
print()
for bool_res in res:
print(bool_res)
if bool_res:
eval.log_info(
"At least one of the discoverability properties was found in metadata !"
)
eval.set_score(2)
return eval
else:
eval.log_info(
"None of the discoverability properties were found in metadata !"
)
eval.set_recommendations(
json_rec["F2A"]["reco2"]
+ checked_properties
+ """
"""
)
eval.set_score(0)
return eval

print("ERROR ERROR ERROR")
eval.set_score(0)
return eval
10 changes: 10 additions & 0 deletions metrics/FAIRMetricsFactory.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from metrics.F2A_Impl import F2A_Impl
from metrics.F2B_Impl import F2B_Impl
from metrics.A11_Impl import A11_Impl
from metrics.A12_Impl import A12_Impl
from metrics.I1_Impl import I1_Impl
from metrics.I1A_Impl import I1A_Impl
from metrics.I1B_Impl import I1B_Impl
Expand Down Expand Up @@ -42,6 +43,7 @@ def get_FC_metrics():
F2A_Impl().get_name(): F2A_Impl(),
F2B_Impl().get_name(): F2B_Impl(),
A11_Impl().get_name(): A11_Impl(),
A12_Impl().get_name(): A12_Impl(),
I1_Impl().get_name(): I1_Impl(),
I2_Impl().get_name(): I2_Impl(),
I3_Impl().get_name(): I3_Impl(),
Expand All @@ -59,6 +61,7 @@ def get_FC_impl(web_resource=None):
F2A_Impl(web_resource),
F2B_Impl(web_resource),
A11_Impl(web_resource),
A12_Impl(web_resource),
I1_Impl(web_resource),
I2_Impl(web_resource),
I3_Impl(web_resource),
Expand Down Expand Up @@ -103,6 +106,13 @@ def get_A11(web_resource, impl=Implem.FAIR_CHECKER):
else:
return A11_Impl(web_resource)

@staticmethod
def get_A12(web_resource, impl=Implem.FAIR_CHECKER):
if impl == Implem.FAIR_METRICS_API:
raise NotYetImplementedException
else:
return A12_Impl(web_resource)

@staticmethod
def get_I1(web_resource, impl=Implem.FAIR_CHECKER):
if impl == Implem.FAIR_METRICS_API:
Expand Down
4 changes: 2 additions & 2 deletions metrics/I1A_Impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def __init__(self, web_resource=None):
self.principle_tag = "I1A"
self.implem = "FAIR-Checker"
self.desc = """
FAIR-Checker verifies that at least one RDF triple can be found in metadata.
For the strong assessment, FAIR-Checker verifies that at least one RDF triple can be found in metadata.
"""

def weak_evaluate(self):
Expand All @@ -31,7 +31,7 @@ def weak_evaluate(self):

def strong_evaluate(self):
"""
Delegated to F2B
Delegated to F2A
"""
eval = self.get_evaluation()
eval.set_implem(self.implem)
Expand Down
8 changes: 4 additions & 4 deletions metrics/I1_Impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def weak_evaluate(self):
Delegated to F2A
"""
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
# eval.set_implem(self.implem)
# eval.set_metrics(self.principle_tag)
eval_from_F2A = F2A_Impl(self.get_web_resource()).weak_evaluate(eval=eval)
return eval_from_F2A

Expand All @@ -37,7 +37,7 @@ def strong_evaluate(self):
Delegated to F2A
"""
eval = self.get_evaluation()
eval.set_implem(self.implem)
eval.set_metrics(self.principle_tag)
# eval.set_implem(self.implem)
# eval.set_metrics(self.principle_tag)
eval_from_F2A = F2A_Impl(self.get_web_resource()).strong_evaluate(eval=eval)
return eval_from_F2A
12 changes: 10 additions & 2 deletions metrics/R12_Impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,13 @@ def strong_evaluate(self):
prov:wasAssociatedWith
prov:startedAtTime
prov:endedAtTime
dct:hasVersion
dct:hasVersion
dct:isVersionOf
dct:creator
dct:contributor
pav:hasVersion
dct:publisher
pav:hasVersion
pav:version
pav:hasCurrentVersion
pav:createdBy
pav:authoredBy
Expand All @@ -55,6 +58,11 @@ def strong_evaluate(self):
pav:curatedBy
pav:createdAt
pav:previousVersion
schema:creator
schema:author
schema:publisher
schema:provider
schema:funder
"""
query_prov = (
self.COMMON_SPARQL_PREFIX
Expand Down
Loading

0 comments on commit 71146c5

Please sign in to comment.