Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Parquet files in ShardedDataSource #764

Merged
merged 5 commits into from
Oct 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion src/levanter/data/sharded_datasource.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import datasets
import fsspec
import numpy as np
import pyarrow.parquet as pq

from levanter.utils import fsspec_utils

Expand Down Expand Up @@ -149,6 +150,10 @@ def datasource_from_json(urls_or_paths: Sequence[str]) -> ShardedDataSource[dict
return JsonDataSource(urls_or_paths)


def datasource_from_parquet(urls_or_paths: Sequence[str]) -> ShardedDataSource[dict]:
return ParquetDataSource(urls_or_paths)


class WrappedHFDataSource(ShardedDataSource[dict]):
"""
This class is responsible for loading a dataset from HuggingFace Datasets and returning the shards.
Expand Down Expand Up @@ -238,6 +243,11 @@ def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[str]:
data = json.load(f)
for doc in data[row:]:
yield doc[self.text_key]
case ".parquet":
table = pq.read_table(f)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

read_table is expensive in the general case and it would be better to look at the metadata to figure out which row group to start on and then use read_row_group I think

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for suggesting this, incorporated this in #766

sliced_table = table.slice(row)
for record in sliced_table.to_pylist():
yield record[self.text_key] # assumes text_key is in record
case _:
raise ValueError(f"Unknown format {format}")

Expand Down Expand Up @@ -313,7 +323,7 @@ def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[Tuple[np.ndar


def _sniff_format_for_dataset(url):
good_formats = [".jsonl", ".txt", ".json"]
good_formats = [".jsonl", ".txt", ".json", ".parquet"]
format_from_url = None
# try both with and without compression (could be gz, bz2, etc, so look at the "first" extension)
extensions = [os.path.splitext(url)[1], os.path.splitext(os.path.splitext(url)[0])[1]]
Expand Down Expand Up @@ -417,6 +427,24 @@ def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[dict]:
return iter(data[row:])


class ParquetDataSource(ShardedDataSource[dict]):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ideally the TextUrlDataSource would also work with parquet files. That is what we use for training configs typically

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense, I added a new test for this in #766

def __init__(self, urls):
self.urls = urls
self._shard_name_to_url_mapping = _mk_shard_name_mapping(urls)

@property
def shard_names(self) -> Sequence[str]:
return list(self._shard_name_to_url_mapping.keys())

def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[dict]:
url = self._shard_name_to_url_mapping[shard_name]
with fsspec.open(url, "rb", compression="infer") as f:
table = pq.read_table(f)
sliced_table = table.slice(row) # zero-copy slicing
for record in sliced_table.to_pylist():
yield record


def _mk_shard_name_mapping(urls):
_shard_name_to_url_mapping = {}
# remove common prefix
Expand Down
43 changes: 42 additions & 1 deletion tests/test_sharded_dataset.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import tempfile

from levanter.data.sharded_datasource import AudioTextUrlDataSource, _sniff_format_for_dataset
from levanter.data.sharded_datasource import AudioTextUrlDataSource, ParquetDataSource, _sniff_format_for_dataset
from test_utils import skip_if_no_soundlibs


Expand All @@ -24,6 +25,46 @@ def test_sniff_format_for_json():
assert _sniff_format_for_dataset(f.name) == ".json"


def test_sniff_format_for_parquet():

import pyarrow as pa
import pyarrow.parquet as pq

with tempfile.NamedTemporaryFile(suffix=".parquet") as f:
table = pa.table({"col1": [1, 2, 3], "col2": ["a", "b", "c"]})
pq.write_table(table, f.name)
f.flush()

assert _sniff_format_for_dataset(f.name) == ".parquet"


@skip_if_no_soundlibs
def test_resolve_audio_pointer():
AudioTextUrlDataSource.resolve_audio_pointer("https://ccrma.stanford.edu/~jos/mp3/trumpet.mp3", 16_000)


def test_basic_parquet_datasource_read_row():

import pyarrow as pa
import pyarrow.parquet as pq

with tempfile.NamedTemporaryFile(suffix=".parquet", delete=True) as f:
# Create a simple dataset
data = {"column1": ["value1", "value2", "value3"], "column2": [10, 20, 30]}
table = pa.Table.from_pydict(data)
pq.write_table(table, f.name)

datasource = ParquetDataSource([os.path.abspath(f.name)])

assert len(datasource.shard_names) == 1, "Expected only one shard"
shard_name = datasource.shard_names[0]

# sanity check: Read data starting from row 1
row_data = list(datasource.open_shard_at_row(shard_name=shard_name, row=1))

# Verify the output
assert len(row_data) == 2 # We expect 2 rows starting from index 1
assert row_data[0]["column1"] == "value2"
assert row_data[0]["column2"] == 20
assert row_data[1]["column1"] == "value3"
assert row_data[1]["column2"] == 30
Loading