Skip to content

Commit

Permalink
Use fixture factory for make_table, make_schema, and `make_catalo…
Browse files Browse the repository at this point in the history
…g` (#189)
  • Loading branch information
nfx authored Sep 13, 2023
1 parent 0f15ab2 commit 77cb4c3
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 34 deletions.
44 changes: 11 additions & 33 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,20 +62,12 @@ def sql_fetch_all(ws: WorkspaceClient):

@pytest.fixture
def make_catalog(sql_exec, make_random):
cleanup = []

def inner():
def create():
name = f"ucx_C{make_random(4)}".lower()
sql_exec(f"CREATE CATALOG {name}")
cleanup.append(name)
return name

yield inner
logger.debug(f"clearing {len(cleanup)} catalogs")
for name in cleanup:
logger.debug(f"removing {name} catalog")
sql_exec(f"DROP CATALOG IF EXISTS {name} CASCADE")
logger.debug(f"removed {len(cleanup)} catalogs")
yield from factory("catalog", create, lambda name: sql_exec(f"DROP CATALOG IF EXISTS {name} CASCADE")) # noqa: F405


def test_catalog_fixture(make_catalog):
Expand All @@ -85,20 +77,12 @@ def test_catalog_fixture(make_catalog):

@pytest.fixture
def make_schema(sql_exec, make_random):
cleanup = []

def inner(catalog="hive_metastore"):
def create(*, catalog="hive_metastore"):
name = f"{catalog}.ucx_S{make_random(4)}".lower()
sql_exec(f"CREATE SCHEMA {name}")
cleanup.append(name)
return name

yield inner
logger.debug(f"clearing {len(cleanup)} schemas")
for name in cleanup:
logger.debug(f"removing {name} schema")
sql_exec(f"DROP SCHEMA IF EXISTS {name} CASCADE")
logger.debug(f"removed {len(cleanup)} schemas")
yield from factory("schema", create, lambda name: sql_exec(f"DROP SCHEMA IF EXISTS {name} CASCADE")) # noqa: F405


def test_schema_fixture(make_schema):
Expand All @@ -108,14 +92,12 @@ def test_schema_fixture(make_schema):

@pytest.fixture
def make_table(sql_exec, make_schema, make_random):
cleanup = []

def inner(
def create(
*,
catalog="hive_metastore",
schema: str | None = None,
ctas: str | None = None,
non_detla: bool = False,
non_delta: bool = False,
external: bool = False,
view: bool = False,
):
Expand All @@ -126,7 +108,7 @@ def inner(
if ctas is not None:
# temporary (if not view)
ddl = f"{ddl} AS {ctas}"
elif non_detla:
elif non_delta:
location = "dbfs:/databricks-datasets/iot-stream/data-device"
ddl = f"{ddl} USING json LOCATION '{location}'"
elif external:
Expand All @@ -137,29 +119,25 @@ def inner(
# managed table
ddl = f"{ddl} (id INT, value STRING)"
sql_exec(ddl)
cleanup.append(name)
return name

yield inner

logger.debug(f"clearing {len(cleanup)} tables")
for name in cleanup:
logger.debug(f"removing {name} table")
def remove(name):
try:
sql_exec(f"DROP TABLE IF EXISTS {name}")
except RuntimeError as e:
if "Cannot drop a view" in str(e):
sql_exec(f"DROP VIEW IF EXISTS {name}")
else:
raise e
logger.debug(f"removed {len(cleanup)} tables")

yield from factory("table", create, remove) # noqa: F405


def test_table_fixture(make_table):
logger.info(f"Created new managed table in new schema: {make_table()}")
logger.info(f'Created new managed table in default schema: {make_table(schema="default")}')
logger.info(f"Created new external table in new schema: {make_table(external=True)}")
logger.info(f"Created new external JSON table in new schema: {make_table(non_detla=True)}")
logger.info(f"Created new external JSON table in new schema: {make_table(non_delta=True)}")
logger.info(f'Created new tmp table in new schema: {make_table(ctas="SELECT 2+2 AS four")}')
logger.info(f'Created new view in new schema: {make_table(view=True, ctas="SELECT 2+2 AS four")}')

Expand Down
2 changes: 1 addition & 1 deletion tests/integration/test_tacls.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def test_describe_all_tables_in_databases(ws: WorkspaceClient, make_catalog, mak
external_table = make_table(schema=schema_b, external=True)
tmp_table = make_table(schema=schema_a, ctas="SELECT 2+2 AS four")
view = make_table(schema=schema_b, ctas="SELECT 2+2 AS four", view=True)
non_delta = make_table(schema=schema_a, non_detla=True)
non_delta = make_table(schema=schema_a, non_delta=True)

logger.info(
f"managed_table={managed_table}, "
Expand Down

0 comments on commit 77cb4c3

Please sign in to comment.