Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update pandas requirement from <2.2 to <2.3 #607

Merged
merged 3 commits into from
Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/by-example/sqlalchemy/dataframe.rst
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ The package provides a ``bulk_insert`` function to use the
workload across multiple batches, using a defined chunk size.

>>> import sqlalchemy as sa
>>> from pandas._testing import makeTimeDataFrame
>>> from crate.client.sqlalchemy.support import insert_bulk
>>> from pueblo.testing.pandas import makeTimeDataFrame
...
>>> # Define number of records, and chunk size.
>>> INSERT_RECORDS = 42
Expand Down Expand Up @@ -159,8 +159,8 @@ in a batched/chunked manner, using a defined chunk size, effectively using the
pandas implementation introduced in the previous section.

>>> import dask.dataframe as dd
>>> from pandas._testing import makeTimeDataFrame
>>> from crate.client.sqlalchemy.support import insert_bulk
>>> from pueblo.testing.pandas import makeTimeDataFrame
...
>>> # Define the number of records, the number of computing partitions,
>>> # and the chunk size of each database insert operation.
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ def read(path):
'dask',
'stopit>=1.1.2,<2',
'flake8>=4,<8',
'pandas<2.2',
'pandas<2.3',
'pueblo>=0.0.7',
'pytz',
],
doc=['sphinx>=3.5,<8',
Expand Down
2 changes: 1 addition & 1 deletion src/crate/client/sqlalchemy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

from .compat.api13 import monkeypatch_add_exec_driver_sql
from .dialect import CrateDialect
from .sa_version import SA_1_4, SA_VERSION
from .sa_version import SA_1_4, SA_2_0, SA_VERSION # noqa: F401


if SA_VERSION < SA_1_4:
Expand Down
10 changes: 5 additions & 5 deletions src/crate/client/sqlalchemy/tests/bulk_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import sqlalchemy as sa
from sqlalchemy.orm import Session

from crate.client.sqlalchemy.sa_version import SA_VERSION, SA_2_0, SA_1_4
from crate.client.sqlalchemy.sa_version import SA_VERSION, SA_2_0

try:
from sqlalchemy.orm import declarative_base
Expand Down Expand Up @@ -170,14 +170,14 @@ def test_bulk_save_modern(self):
self.assertSequenceEqual(expected_bulk_args, bulk_args)

@skipIf(sys.version_info < (3, 8), "SQLAlchemy/pandas is not supported on Python <3.8")
@skipIf(SA_VERSION < SA_1_4, "SQLAlchemy 1.3 is not supported by pandas")
@skipIf(SA_VERSION < SA_2_0, "SQLAlchemy 1.4 is no longer supported by pandas 2.2")
@patch('crate.client.connection.Cursor', mock_cursor=FakeCursor)
def test_bulk_save_pandas(self, mock_cursor):
"""
Verify bulk INSERT with pandas.
"""
from pandas._testing import makeTimeDataFrame
from crate.client.sqlalchemy.support import insert_bulk
from pueblo.testing.pandas import makeTimeDataFrame

# 42 records / 8 chunksize = 5.25, which means 6 batches will be emitted.
INSERT_RECORDS = 42
Expand Down Expand Up @@ -209,15 +209,15 @@ def test_bulk_save_pandas(self, mock_cursor):
self.assertEqual(effective_op_count, OPCOUNT)

@skipIf(sys.version_info < (3, 8), "SQLAlchemy/Dask is not supported on Python <3.8")
@skipIf(SA_VERSION < SA_1_4, "SQLAlchemy 1.3 is not supported by pandas")
@skipIf(SA_VERSION < SA_2_0, "SQLAlchemy 1.4 is no longer supported by pandas 2.2")
@patch('crate.client.connection.Cursor', mock_cursor=FakeCursor)
def test_bulk_save_dask(self, mock_cursor):
"""
Verify bulk INSERT with Dask.
"""
import dask.dataframe as dd
from pandas._testing import makeTimeDataFrame
from crate.client.sqlalchemy.support import insert_bulk
from pueblo.testing.pandas import makeTimeDataFrame

# 42 records / 4 partitions means each partition has a size of 10.5 elements.
# Because the chunk size 8 is slightly smaller than 10, the partition will not
Expand Down
4 changes: 2 additions & 2 deletions src/crate/client/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
crate_host, crate_path, crate_port, \
crate_transport_port, docs_path, localhost
from crate.client import connect
from .sqlalchemy import SA_VERSION, SA_1_4
from .sqlalchemy import SA_VERSION, SA_2_0

from .test_cursor import CursorTest
from .test_connection import ConnectionTest
Expand Down Expand Up @@ -395,7 +395,7 @@ def test_suite():
]

# Don't run DataFrame integration tests on SQLAlchemy 1.3 and Python 3.7.
skip_dataframe = SA_VERSION < SA_1_4 or sys.version_info < (3, 8)
skip_dataframe = SA_VERSION < SA_2_0 or sys.version_info < (3, 8)
if not skip_dataframe:
sqlalchemy_integration_tests += [
'docs/by-example/sqlalchemy/dataframe.rst',
Expand Down
Loading