Skip to content

Commit

Permalink
FEAT-modin-project#2451: More linting
Browse files Browse the repository at this point in the history
Signed-off-by: William Ma <[email protected]>
  • Loading branch information
williamma12 committed Feb 3, 2021
1 parent 780fb59 commit 0cc367e
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 7 deletions.
6 changes: 3 additions & 3 deletions modin/engines/base/io/text/csv_glob_dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,10 +404,10 @@ def partitioned_multiple_files(
split_size = 0

if nrows == 0:
break
break

if f.tell() == fsize:
continue
continue

DEBUG_START = f.tell()
file_splits, rows_read = cls.partitioned_file(
Expand Down Expand Up @@ -446,7 +446,7 @@ def partitioned_multiple_files(
final_result.append(file_splits)
else:
# Don't append anything if the file was too small for one partition.
if len(file_splits) > 1:
if len(file_splits) > 1:
print("+++")
final_result.append(file_splits[:-1])
split_result = [file_splits[-1]]
Expand Down
15 changes: 11 additions & 4 deletions modin/experimental/pandas/test/test_io_exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def TestReadGlobCSVFixture():

teardown_test_files(filenames)


@pytest.mark.usefixtures("TestReadGlobCSVFixture")
@pytest.mark.skipif(
Engine.get() != "Ray", reason="Currently only support Ray engine for glob paths."
Expand All @@ -99,12 +100,11 @@ def test_read_multiple_small_csv(self): # noqa: F811
# Glob does not guarantee ordering so we have to test both.
df_equals(modin_df, pandas_df)


@pytest.mark.parametrize("nrows", [35, 100])
def test_read_multiple_csv_nrows(self, request, nrows): # noqa: F811
pandas_df = pandas.concat([pandas.read_csv(fname) for fname in pytest.files])
pandas_df = pandas_df.iloc[:nrows, :]

modin_df = pd.read_csv(pytest.glob_path, nrows=nrows)

# Indexes get messed up when concatting so we reset both.
Expand All @@ -122,9 +122,16 @@ def test_read_multiple_csv_s3():
modin_df = pd.read_csv("S3://noaa-ghcn-pds/csv/178*.csv")

# We have to specify the columns because the column names are not identical. Since we specified the column names, we also have to skip the original column names.
pandas_dfs = [pandas.read_csv("s3://noaa-ghcn-pds/csv/178{}.csv".format(i), names=modin_df.columns, skiprows=[0]) for i in range(10)]
pandas_dfs = [
pandas.read_csv(
"s3://noaa-ghcn-pds/csv/178{}.csv".format(i),
names=modin_df.columns,
skiprows=[0],
)
for i in range(10)
]
pandas_df = pd.concat(pandas_dfs)

# Indexes get messed up when concatting so we reset both.
pandas_df = pandas_df.reset_index(drop=True)
modin_df = modin_df.reset_index(drop=True)
Expand Down

0 comments on commit 0cc367e

Please sign in to comment.