The library requires Python 3.6+ and depends on standard packages such as pandas, numpy
+
The library requires Python 3.8+ and depends on standard packages such as pandas, numpy
The requirements.txt
lists the necessary packages.
+# Import binary fairness metrics from Jurity
+from jurity.fairness import BinaryFairnessMetrics
+
+# Instead of 0/1 deterministic membership at individual level
+# consider likelihoods of membership to protected classes for each sample
+binary_predictions = [1, 1, 0, 1]
+memberships = [[0.2, 0.8], [0.4, 0.6], [0.2, 0.8], [0.9, 0.1]]
+
+# Metric
+metric = BinaryFairnessMetrics.StatisticalParity()
+print("Binary Fairness score: ", metric.get_score(binary_predictions, memberships))
+
+# Surrogate membership: consider access to surrogate membership at the group level.
+surrogates = [0, 2, 0, 1]
+print("Binary Fairness score: ", metric.get_score(binary_predictions, memberships, surrogates))
+
+ # Import binary fairness metrics and mitigation
diff --git a/docsrc/install.rst b/docsrc/install.rst
index b5089bb..7c447fc 100644
--- a/docsrc/install.rst
+++ b/docsrc/install.rst
@@ -15,7 +15,7 @@ Installation
Requirements
------------
-The library requires Python **3.6+** and depends on standard packages such as ``pandas, numpy``
+The library requires Python **3.8+** and depends on standard packages such as ``pandas, numpy``
The ``requirements.txt`` lists the necessary packages.
Install via pip
diff --git a/evalrs/evaluation/utils.py b/evalrs/evaluation/utils.py
index 1101c91..b99dda9 100644
--- a/evalrs/evaluation/utils.py
+++ b/evalrs/evaluation/utils.py
@@ -17,7 +17,7 @@
from datetime import datetime
-LFM_DATASET_PATH="https://cikm-evalrs-dataset.s3.us-west-2.amazonaws.com/evalrs_dataset.zip"
+LFM_DATASET_PATH="https://evarl-2022-public-dataset.s3.us-east-1.amazonaws.com/evalrs_dataset.zip"
TOP_K_CHALLENGE = 100
LEADERBOARD_TESTS = [
diff --git a/jurity/_version.py b/jurity/_version.py
index 3ff5aea..35bb942 100644
--- a/jurity/_version.py
+++ b/jurity/_version.py
@@ -2,4 +2,4 @@
# Copyright FMR LLC
# SPDX-License-Identifier: Apache-2.0
-__version__ = "2.0.1"
\ No newline at end of file
+__version__ = "2.1.0"
\ No newline at end of file
diff --git a/jurity/fairness/average_odds.py b/jurity/fairness/average_odds.py
index 76aa0a9..02408e1 100644
--- a/jurity/fairness/average_odds.py
+++ b/jurity/fairness/average_odds.py
@@ -47,11 +47,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
Parameters
----------
- labels: labels: Union[List, np.ndarray, pd.Series]
- Ground truth labels for each row (0/1).
+ labels: Union[List, np.ndarray, pd.Series]
+ Ground truth labels for each row (0/1).
predictions: Union[List, np.ndarray, pd.Series]
- Binary predictions from some black-box classifier (0/1).
- Binary prediction for each sample from a binary (0/1) lack-box classifier.
+ Binary prediction for each sample from a binary (0/1) black-box classifier.
memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
Membership attribute for each sample.
If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/equal_opportunity.py b/jurity/fairness/equal_opportunity.py
index dc92602..b2e21b9 100644
--- a/jurity/fairness/equal_opportunity.py
+++ b/jurity/fairness/equal_opportunity.py
@@ -39,11 +39,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
Parameters
----------
- labels: labels: Union[List, np.ndarray, pd.Series]
- Ground truth labels for each row (0/1).
+ labels: Union[List, np.ndarray, pd.Series]
+ Ground truth labels for each row (0/1).
predictions: Union[List, np.ndarray, pd.Series]
- Binary predictions from some black-box classifier (0/1).
- Binary prediction for each sample from a binary (0/1) lack-box classifier.
+ Binary prediction for each sample from a binary (0/1) black-box classifier.
memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
Membership attribute for each sample.
If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/fnr_difference.py b/jurity/fairness/fnr_difference.py
index 822902c..c5d75d9 100644
--- a/jurity/fairness/fnr_difference.py
+++ b/jurity/fairness/fnr_difference.py
@@ -44,11 +44,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
Parameters
----------
- labels: labels: Union[List, np.ndarray, pd.Series]
- Ground truth labels for each row (0/1).
+ labels: Union[List, np.ndarray, pd.Series]
+ Ground truth labels for each row (0/1).
predictions: Union[List, np.ndarray, pd.Series]
- Binary predictions from some black-box classifier (0/1).
- Binary prediction for each sample from a binary (0/1) lack-box classifier.
+ Binary prediction for each sample from a binary (0/1) black-box classifier.
memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
Membership attribute for each sample.
If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/predictive_equality.py b/jurity/fairness/predictive_equality.py
index 73852ee..68af889 100644
--- a/jurity/fairness/predictive_equality.py
+++ b/jurity/fairness/predictive_equality.py
@@ -48,11 +48,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
Parameters
----------
- labels: labels: Union[List, np.ndarray, pd.Series]
- Ground truth labels for each row (0/1).
+ labels: Union[List, np.ndarray, pd.Series]
+ Ground truth labels for each row (0/1).
predictions: Union[List, np.ndarray, pd.Series]
- Binary predictions from some black-box classifier (0/1).
- Binary prediction for each sample from a binary (0/1) lack-box classifier.
+ Binary prediction for each sample from a binary (0/1) black-box classifier.
memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
Membership attribute for each sample.
If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/statistical_parity.py b/jurity/fairness/statistical_parity.py
index fa3157a..256e6f3 100644
--- a/jurity/fairness/statistical_parity.py
+++ b/jurity/fairness/statistical_parity.py
@@ -40,8 +40,7 @@ def get_score(predictions: Union[List, np.ndarray, pd.Series],
Parameters
----------
predictions: Union[List, np.ndarray, pd.Series]
- Binary predictions from some black-box classifier (0/1).
- Binary prediction for each sample from a binary (0/1) lack-box classifier.
+ Binary prediction for each sample from a binary (0/1) black-box classifier.
memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
Membership attribute for each sample.
If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/setup.py b/setup.py
index 003ffa9..1c632f5 100644
--- a/setup.py
+++ b/setup.py
@@ -26,10 +26,10 @@
packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
classifiers=[
"License :: OSI Approved :: Apache Software License",
- "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
project_urls={"Source": "https://github.com/fidelity/jurity"},
install_requires=required,
- python_requires=">=3.6"
+ python_requires=">=3.8"
)