Skip to content

Commit

Permalink
removing legacy code
Browse files Browse the repository at this point in the history
  • Loading branch information
alex-rakowski committed Jun 26, 2024
1 parent 8a5e4b7 commit c624f36
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 33 deletions.
37 changes: 5 additions & 32 deletions dask_snowflake/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,7 @@

import dask
import dask.dataframe as dd
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.delayed import delayed
from dask.highlevelgraph import HighLevelGraph
from dask.layers import DataFrameIOLayer
from dask.utils import parse_bytes


Expand Down Expand Up @@ -299,31 +295,8 @@ def read_snowflake(
batches, meta, npartitions=npartitions, partition_size=partition_size
)

# Legacy check
# if new enough dask version we use from_map
if hasattr(dd, "from_map"):
return dd.from_map(
partial(_fetch_batches, arrow_options=arrow_options),
batches_partitioned,
meta=meta,
)
# if not revert to exising method
else:
label = "read-snowflake-"
output_name = label + tokenize(
query,
connection_kwargs,
arrow_options,
)
divisions = tuple([None] * (len(batches_partitioned) + 1))
# Create Blockwise layer
layer = DataFrameIOLayer(
output_name,
meta.columns,
batches_partitioned,
# TODO: Implement wrapper to only convert columns requested
partial(_fetch_batches, arrow_options=arrow_options),
label=label,
)
graph = HighLevelGraph({output_name: layer}, {output_name: set()})
return new_dd_object(graph, output_name, meta, divisions)
return dd.from_map(
partial(_fetch_batches, arrow_options=arrow_options),
batches_partitioned,
meta=meta,
)
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
dask>=2021.05.0
dask>=2024.3.0
distributed
snowflake-connector-python[pandas]>=2.6.0
snowflake-sqlalchemy
Expand Down

0 comments on commit c624f36

Please sign in to comment.