diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 75ffe40..db4f79b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v2 @@ -27,7 +27,7 @@ jobs: - name: Check shell: bash run: | - python3 -m pip install --upgrade pip + python3 -m pip install --upgrade pip setuptools pip install -e . python3 -m unittest discover -v tests python3 setup.py install diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 4c35b61..271f9a1 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -2,6 +2,17 @@ CHANGELOG ========= +------------------------------------------------------------------------------- +Sep 06, 2024 2.1.0 +------------------------------------------------------------------------------- +Major +- Remove support for Python 3.7 and add support for Python 3.11 and Python 3.12 +- Update CI test environment to drop Python 3.7 and add Python 3.11, Python 3.12. +- Fix typos in docstrings for fairness metrics get_scores method. +- Update S3 link in evalrs lfm_dataset_path +- Add Probabilistic Fairness Metric calculation example in quick start. +- Adding setuptools in github worklow to address no pre-install of setuptools in Python 3.12 + ------------------------------------------------------------------------------- Jan 25, 2023 2.0.1 ------------------------------------------------------------------------------- diff --git a/README.md b/README.md index 501f135..8cb227a 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ print('F1 score is', f1_score.get_score(predictions, labels)) ## Installation -Jurity requires **Python 3.7+** and can be installed from PyPI using ``pip install jurity`` or by building from source as shown in [installation instructions](https://fidelity.github.io/jurity/install.html). +Jurity requires **Python 3.8+** and can be installed from PyPI using ``pip install jurity`` or by building from source as shown in [installation instructions](https://fidelity.github.io/jurity/install.html). ## Citation diff --git a/docs/_sources/install.rst.txt b/docs/_sources/install.rst.txt index b5089bb..7c447fc 100644 --- a/docs/_sources/install.rst.txt +++ b/docs/_sources/install.rst.txt @@ -15,7 +15,7 @@ Installation Requirements ------------ -The library requires Python **3.6+** and depends on standard packages such as ``pandas, numpy`` +The library requires Python **3.8+** and depends on standard packages such as ``pandas, numpy`` The ``requirements.txt`` lists the necessary packages. Install via pip diff --git a/docs/install.html b/docs/install.html index 0b26e2e..e23fc63 100644 --- a/docs/install.html +++ b/docs/install.html @@ -93,7 +93,7 @@

Requirements

-

The library requires Python 3.6+ and depends on standard packages such as pandas, numpy +

The library requires Python 3.8+ and depends on standard packages such as pandas, numpy The requirements.txt lists the necessary packages.

diff --git a/docs/quick.html b/docs/quick.html index 2a8c027..ed9c740 100644 --- a/docs/quick.html +++ b/docs/quick.html @@ -109,6 +109,30 @@

Calculate Fairness Metrics +

Calculate Probabilistic Fairness Metric

+
+
+
+# Import binary fairness metrics from Jurity
+from jurity.fairness import BinaryFairnessMetrics
+
+# Instead of 0/1 deterministic membership at individual level
+# consider likelihoods of membership to protected classes for each sample
+binary_predictions = [1, 1, 0, 1]
+memberships = [[0.2, 0.8], [0.4, 0.6], [0.2, 0.8], [0.9, 0.1]]
+
+# Metric
+metric = BinaryFairnessMetrics.StatisticalParity()
+print("Binary Fairness score: ", metric.get_score(binary_predictions, memberships))
+
+# Surrogate membership: consider access to surrogate membership at the group level.
+surrogates = [0, 2, 0, 1]
+print("Binary Fairness score: ", metric.get_score(binary_predictions, memberships, surrogates))
+      
+
+
+

Fit and Apply Bias Mitigation

# Import binary fairness metrics and mitigation
diff --git a/docsrc/install.rst b/docsrc/install.rst
index b5089bb..7c447fc 100644
--- a/docsrc/install.rst
+++ b/docsrc/install.rst
@@ -15,7 +15,7 @@ Installation
 Requirements
 ------------
 
-The library requires Python **3.6+** and depends on standard packages such as ``pandas, numpy``
+The library requires Python **3.8+** and depends on standard packages such as ``pandas, numpy``
 The ``requirements.txt`` lists the necessary packages. 
 
 Install via pip
diff --git a/evalrs/evaluation/utils.py b/evalrs/evaluation/utils.py
index 1101c91..b99dda9 100644
--- a/evalrs/evaluation/utils.py
+++ b/evalrs/evaluation/utils.py
@@ -17,7 +17,7 @@
 from datetime import datetime
 
 
-LFM_DATASET_PATH="https://cikm-evalrs-dataset.s3.us-west-2.amazonaws.com/evalrs_dataset.zip"
+LFM_DATASET_PATH="https://evarl-2022-public-dataset.s3.us-east-1.amazonaws.com/evalrs_dataset.zip"
 
 TOP_K_CHALLENGE = 100
 LEADERBOARD_TESTS = [
diff --git a/jurity/_version.py b/jurity/_version.py
index 3ff5aea..35bb942 100644
--- a/jurity/_version.py
+++ b/jurity/_version.py
@@ -2,4 +2,4 @@
 # Copyright FMR LLC 
 # SPDX-License-Identifier: Apache-2.0
 
-__version__ = "2.0.1"
\ No newline at end of file
+__version__ = "2.1.0"
\ No newline at end of file
diff --git a/jurity/fairness/average_odds.py b/jurity/fairness/average_odds.py
index 76aa0a9..02408e1 100644
--- a/jurity/fairness/average_odds.py
+++ b/jurity/fairness/average_odds.py
@@ -47,11 +47,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
 
         Parameters
         ----------
-        labels: labels: Union[List, np.ndarray, pd.Series]
-        Ground truth labels for each row (0/1).
+        labels: Union[List, np.ndarray, pd.Series]
+            Ground truth labels for each row (0/1).
         predictions: Union[List, np.ndarray, pd.Series]
-            Binary predictions from some black-box classifier (0/1).
-            Binary prediction for each sample from a binary (0/1) lack-box classifier.
+            Binary prediction for each sample from a binary (0/1) black-box classifier.
         memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
             Membership attribute for each sample.
                 If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/equal_opportunity.py b/jurity/fairness/equal_opportunity.py
index dc92602..b2e21b9 100644
--- a/jurity/fairness/equal_opportunity.py
+++ b/jurity/fairness/equal_opportunity.py
@@ -39,11 +39,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
 
         Parameters
         ----------
-        labels: labels: Union[List, np.ndarray, pd.Series]
-        Ground truth labels for each row (0/1).
+        labels: Union[List, np.ndarray, pd.Series]
+            Ground truth labels for each row (0/1).
         predictions: Union[List, np.ndarray, pd.Series]
-            Binary predictions from some black-box classifier (0/1).
-            Binary prediction for each sample from a binary (0/1) lack-box classifier.
+            Binary prediction for each sample from a binary (0/1) black-box classifier.
         memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
             Membership attribute for each sample.
                 If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/fnr_difference.py b/jurity/fairness/fnr_difference.py
index 822902c..c5d75d9 100644
--- a/jurity/fairness/fnr_difference.py
+++ b/jurity/fairness/fnr_difference.py
@@ -44,11 +44,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
 
         Parameters
         ----------
-        labels: labels: Union[List, np.ndarray, pd.Series]
-        Ground truth labels for each row (0/1).
+        labels: Union[List, np.ndarray, pd.Series]
+            Ground truth labels for each row (0/1).
         predictions: Union[List, np.ndarray, pd.Series]
-            Binary predictions from some black-box classifier (0/1).
-            Binary prediction for each sample from a binary (0/1) lack-box classifier.
+            Binary prediction for each sample from a binary (0/1) black-box classifier.
         memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
             Membership attribute for each sample.
                 If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/predictive_equality.py b/jurity/fairness/predictive_equality.py
index 73852ee..68af889 100644
--- a/jurity/fairness/predictive_equality.py
+++ b/jurity/fairness/predictive_equality.py
@@ -48,11 +48,10 @@ def get_score(labels: Union[List, np.ndarray, pd.Series],
 
         Parameters
         ----------
-        labels: labels: Union[List, np.ndarray, pd.Series]
-        Ground truth labels for each row (0/1).
+        labels: Union[List, np.ndarray, pd.Series]
+            Ground truth labels for each row (0/1).
         predictions: Union[List, np.ndarray, pd.Series]
-            Binary predictions from some black-box classifier (0/1).
-            Binary prediction for each sample from a binary (0/1) lack-box classifier.
+            Binary prediction for each sample from a binary (0/1) black-box classifier.
         memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
             Membership attribute for each sample.
                 If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/jurity/fairness/statistical_parity.py b/jurity/fairness/statistical_parity.py
index fa3157a..256e6f3 100644
--- a/jurity/fairness/statistical_parity.py
+++ b/jurity/fairness/statistical_parity.py
@@ -40,8 +40,7 @@ def get_score(predictions: Union[List, np.ndarray, pd.Series],
         Parameters
         ----------
         predictions: Union[List, np.ndarray, pd.Series]
-            Binary predictions from some black-box classifier (0/1).
-            Binary prediction for each sample from a binary (0/1) lack-box classifier.
+            Binary prediction for each sample from a binary (0/1) black-box classifier.
         memberships: Union[List, np.ndarray, pd.Series, List[List], pd.DataFrame],
             Membership attribute for each sample.
                 If deterministic, it is a binary label for each sample [0, 1, 0, .., 1]
diff --git a/setup.py b/setup.py
index 003ffa9..1c632f5 100644
--- a/setup.py
+++ b/setup.py
@@ -26,10 +26,10 @@
     packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
     classifiers=[
         "License :: OSI Approved :: Apache Software License",
-        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.8",
         "Operating System :: OS Independent",
     ],
     project_urls={"Source": "https://github.com/fidelity/jurity"},
     install_requires=required,
-    python_requires=">=3.6"
+    python_requires=">=3.8"
 )