diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 8ea00aaba93..efff5abecad 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -6,7 +6,7 @@ parse = (?P\d+) ((?Pa|b|rc) (?P
\d+)  # pre-release version num
 	)?
-serialize = 
+serialize =
 	{major}.{minor}.{patch}{prekind}{pre}
 	{major}.{minor}.{patch}
 commit = False
@@ -15,7 +15,7 @@ tag = False
 [bumpversion:part:prekind]
 first_value = a
 optional_value = final
-values = 
+values =
 	a
 	b
 	rc
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ecfe53dd4fb..750e985ee08 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,5 +1,5 @@
 # This file contains the code owners for the dbt-core repo.
-# PRs will be automatically assigned for review to the associated 
+# PRs will be automatically assigned for review to the associated
 # team(s) or person(s) that touches any files that are mapped to them.
 #
 # A statement takes precedence over the statements above it so more general
@@ -9,7 +9,7 @@
 # Consult GitHub documentation for formatting guidelines:
 # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#example-of-a-codeowners-file
 
-# As a default for areas with no assignment, 
+# As a default for areas with no assignment,
 # the core team as a whole will be assigned
 *       @dbt-labs/core
 
@@ -37,7 +37,7 @@
 /core/dbt/include/global_project    @dbt-labs/core-execution @dbt-labs/core-adapters
 
 # Perf regression testing framework
-# This excludes the test project files itself since those aren't specific 
+# This excludes the test project files itself since those aren't specific
 # framework changes (excluded by not setting an owner next to it- no owner)
 /performance @nathaniel-may
 /performance/projects
diff --git a/.github/actions/latest-wrangler/README.md b/.github/actions/latest-wrangler/README.md
index 867247b641d..9d5033259c0 100644
--- a/.github/actions/latest-wrangler/README.md
+++ b/.github/actions/latest-wrangler/README.md
@@ -9,7 +9,7 @@ Plug in the necessary inputs to determine if the container being built should be
 | `package` | Name of the GH package to check against |
 | `new_version` | Semver of new container |
 | `gh_token` | GH token with package read scope|
-| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)| 
+| `halt_on_missing` | Return non-zero exit code if requested package does not exist. (defaults to false)|
 
 
 ## Outputs
@@ -21,7 +21,7 @@ Plug in the necessary inputs to determine if the container being built should be
 ## Example workflow
 ```yaml
 name: Ship it!
-on: 
+on:
   workflow_dispatch:
     inputs:
       package:
@@ -47,4 +47,4 @@ jobs:
       run: |
         echo "Is it latest?  Survey says: ${{ steps.is_latest.outputs.latest }} !"
         echo "Is it minor.latest?  Survey says: ${{ steps.is_latest.outputs.minor_latest }} !"
-```
\ No newline at end of file
+```
diff --git a/.github/actions/latest-wrangler/action.yml b/.github/actions/latest-wrangler/action.yml
index ca82956cbd8..d712eecf64e 100644
--- a/.github/actions/latest-wrangler/action.yml
+++ b/.github/actions/latest-wrangler/action.yml
@@ -7,7 +7,7 @@ inputs:
   new_version:
     description: "Semver of the container being built (I.E. 1.0.4)"
     required: true
-  gh_token: 
+  gh_token:
     description: "Auth token for github (must have view packages scope)"
     required: true
 outputs:
diff --git a/.github/actions/latest-wrangler/examples/example_workflow.yml b/.github/actions/latest-wrangler/examples/example_workflow.yml
index 2a08fb54e49..66b171c434b 100644
--- a/.github/actions/latest-wrangler/examples/example_workflow.yml
+++ b/.github/actions/latest-wrangler/examples/example_workflow.yml
@@ -1,5 +1,5 @@
 name: Ship it!
-on: 
+on:
   workflow_dispatch:
     inputs:
       package:
@@ -23,4 +23,4 @@ jobs:
         gh_token: ${{ secrets.GITHUB_TOKEN }}
     - name: Print the results
       run: |
-        echo "Is it latest?  Survey says: ${{ steps.is_latest.outputs.latest }} !"
\ No newline at end of file
+        echo "Is it latest?  Survey says: ${{ steps.is_latest.outputs.latest }} !"
diff --git a/.github/actions/latest-wrangler/examples/example_workflow_dispatch.json b/.github/actions/latest-wrangler/examples/example_workflow_dispatch.json
index 6330dfa91a9..29667a4a167 100644
--- a/.github/actions/latest-wrangler/examples/example_workflow_dispatch.json
+++ b/.github/actions/latest-wrangler/examples/example_workflow_dispatch.json
@@ -3,4 +3,4 @@
       "version_number": "1.0.1",
       "package": "dbt-redshift"
     }
-}
\ No newline at end of file
+}
diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py
index 549ec384631..23e14cf5abe 100644
--- a/.github/actions/latest-wrangler/main.py
+++ b/.github/actions/latest-wrangler/main.py
@@ -23,7 +23,7 @@
     # Log info if we don't get a 200
     if package_request.status_code != 200:
         print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
-    
+
     # Make an early exit if there is no matching package in github
     if package_request.status_code == 404:
         if halt_on_missing:
@@ -35,12 +35,11 @@
             sys.exit(0)
 
     # TODO: verify package meta is "correct"
-    # https://github.com/dbt-labs/dbt-core/issues/4640 
+    # https://github.com/dbt-labs/dbt-core/issues/4640
 
     # map versions and tags
     version_tag_map = {
-        version["id"]: version["metadata"]["container"]["tags"]
-        for version in package_meta
+        version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
     }
 
     # is pre-release
@@ -63,9 +62,7 @@
         if f"{new_version.major}.{new_version.minor}.latest" in tags:
             # Similar to above, only now we expect exactly two tags:
             # major.minor.patch and major.minor.latest
-            current_minor_latest = parse(
-                [tag for tag in tags if "latest" not in tag][0]
-            )
+            current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
         else:
             current_minor_latest = False
 
@@ -79,7 +76,8 @@ def is_latest(
 
         :param pre_rel: Wether or not the version of the new container is a pre-release
         :param new_version: The version of the new container
-        :param remote_latest: The version of the previously identified container that's already tagged latest or False
+        :param remote_latest: The version of the previously identified container that's
+            already tagged latest or False
         """
         # is a pre-release = not latest
         if pre_rel:
@@ -95,4 +93,3 @@ def is_latest(
 
     print(f"::set-output name=latest::{latest}")
     print(f"::set-output name=minor_latest::{minor_latest}")
-    
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
index 21c42fa7062..f0fa4dceaaf 100644
--- a/.github/workflows/backport.yml
+++ b/.github/workflows/backport.yml
@@ -1,5 +1,5 @@
 # **what?**
-# When a PR is merged, if it has the backport label, it will create 
+# When a PR is merged, if it has the backport label, it will create
 # a new PR to backport those changes to the given branch. If it can't
 # cleanly do a backport, it will comment on the merged PR of the failure.
 #
diff --git a/.github/workflows/jira-creation.yml b/.github/workflows/jira-creation.yml
index c84e106a75d..b4016befce0 100644
--- a/.github/workflows/jira-creation.yml
+++ b/.github/workflows/jira-creation.yml
@@ -13,7 +13,7 @@ name: Jira Issue Creation
 on:
   issues:
     types: [opened, labeled]
-    
+
 permissions:
   issues: write
 
diff --git a/.github/workflows/jira-label.yml b/.github/workflows/jira-label.yml
index fd533a170fe..3da2e3a3867 100644
--- a/.github/workflows/jira-label.yml
+++ b/.github/workflows/jira-label.yml
@@ -13,7 +13,7 @@ name: Jira Label Mirroring
 on:
   issues:
     types: [labeled, unlabeled]
-    
+
 permissions:
   issues: read
 
@@ -24,4 +24,3 @@ jobs:
       JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
       JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
       JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
-    
diff --git a/.github/workflows/jira-transition.yml b/.github/workflows/jira-transition.yml
index 71273c7a9c9..ed9f9cd4fc7 100644
--- a/.github/workflows/jira-transition.yml
+++ b/.github/workflows/jira-transition.yml
@@ -21,4 +21,4 @@ jobs:
     secrets:
       JIRA_BASE_URL: ${{ secrets.JIRA_BASE_URL }}
       JIRA_USER_EMAIL: ${{ secrets.JIRA_USER_EMAIL }}
-      JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
\ No newline at end of file
+      JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 26f7d409ec5..842cedf6ffb 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,5 +1,5 @@
 # **what?**
-# Take the given commit, run unit tests specifically on that sha, build and 
+# Take the given commit, run unit tests specifically on that sha, build and
 # package it, and then release to GitHub and PyPi with that specific build
 
 # **why?**
@@ -144,7 +144,7 @@ jobs:
 
 
   github-release:
-    name: GitHub Release 
+    name: GitHub Release
 
     needs: test-build
 
@@ -155,7 +155,7 @@ jobs:
         with:
           name: dist
           path: '.'
-          
+
       # Need to set an output variable because env variables can't be taken as input
       # This is needed for the next step with releasing to GitHub
       - name: Find release type
@@ -179,7 +179,7 @@ jobs:
             dbt_core-${{github.event.inputs.version_number}}-py3-none-any.whl
             dbt-postgres-${{github.event.inputs.version_number}}.tar.gz
             dbt-core-${{github.event.inputs.version_number}}.tar.gz
-          
+
   pypi-release:
     name: Pypi release
 
@@ -188,12 +188,12 @@ jobs:
     needs: github-release
 
     environment: PypiProd
-    steps:    
+    steps:
       - uses: actions/download-artifact@v2
         with:
           name: dist
           path: 'dist'
-      
+
       - name: Publish distribution to PyPI
         uses: pypa/gh-action-pypi-publish@v1.4.2
         with:
diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml
index 67f121f1bd0..1a955e3a57e 100644
--- a/.github/workflows/release_docker.yml
+++ b/.github/workflows/release_docker.yml
@@ -5,7 +5,7 @@
 # Docker images for dbt are used in a number of important places throughout the dbt ecosystem.  This is how we keep those images up-to-date.
 
 # **when?**
-# This is triggered manually 
+# This is triggered manually
 
 # **next steps**
 # - build this into the release workflow (or conversly, break out the different release methods into their own workflow files)
@@ -55,7 +55,7 @@ jobs:
     name: Set up docker image builder
     runs-on: ubuntu-latest
     needs: [get_version_meta]
-    steps:      
+    steps:
       - name: Set up Docker Buildx
         uses: docker/setup-buildx-action@v1
 
@@ -110,4 +110,4 @@ jobs:
           build-args: |
             ${{ steps.build_arg.outputs.build_arg_name }}_ref=${{ steps.build_arg.outputs.build_arg_value }}@v${{ github.event.inputs.version_number }}
           tags: |
-            ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
\ No newline at end of file
+            ghcr.io/dbt-labs/${{ github.event.inputs.package }}:latest
diff --git a/.github/workflows/schema-check.yml b/.github/workflows/schema-check.yml
index c647cc2d201..ee65ff71296 100644
--- a/.github/workflows/schema-check.yml
+++ b/.github/workflows/schema-check.yml
@@ -1,5 +1,5 @@
 # **what?**
-# Compares the schema of the dbt version of the given ref vs 
+# Compares the schema of the dbt version of the given ref vs
 # the latest official schema releases found in schemas.getdbt.com.
 # If there are differences, the workflow will fail and upload the
 # diff as an artifact. The metadata team should be alerted to the change.
@@ -37,20 +37,20 @@ jobs:
         uses: actions/setup-python@v2
         with:
           python-version: 3.8
-                
+
       - name: Checkout dbt repo
         uses: actions/checkout@v2.3.4
         with:
             path: ${{ env.DBT_REPO_DIRECTORY }}
-                       
+
       - name: Checkout schemas.getdbt.com repo
-        uses: actions/checkout@v2.3.4    
-        with: 
+        uses: actions/checkout@v2.3.4
+        with:
           repository: dbt-labs/schemas.getdbt.com
           ref: 'main'
           ssh-key: ${{ secrets.SCHEMA_SSH_PRIVATE_KEY }}
           path: ${{ env.SCHEMA_REPO_DIRECTORY }}
-        
+
       - name: Generate current schema
         run: |
           cd ${{ env.DBT_REPO_DIRECTORY }}
@@ -59,7 +59,7 @@ jobs:
           pip install --upgrade pip
           pip install -r dev-requirements.txt -r editable-requirements.txt
           python scripts/collect-artifact-schema.py --path ${{ env.LATEST_SCHEMA_PATH }}
-          
+
       # Copy generated schema files into the schemas.getdbt.com repo
       # Do a git diff to find any changes
       # Ignore any date or version changes though
diff --git a/.github/workflows/structured-logging-schema-check.yml b/.github/workflows/structured-logging-schema-check.yml
index 96b613764a1..cfdb479f212 100644
--- a/.github/workflows/structured-logging-schema-check.yml
+++ b/.github/workflows/structured-logging-schema-check.yml
@@ -1,6 +1,6 @@
 # This Action checks makes a dbt run to sample json structured logs
 # and checks that they conform to the currently documented schema.
-# 
+#
 # If this action fails it either means we have unintentionally deviated
 # from our documented structured logging schema, or we need to bump the
 # version of our structured logging and add new documentation to
diff --git a/.github/workflows/test/.actrc b/.github/workflows/test/.actrc
index 21448263dc1..027d95f14ff 100644
--- a/.github/workflows/test/.actrc
+++ b/.github/workflows/test/.actrc
@@ -1 +1 @@
--P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
\ No newline at end of file
+-P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest
diff --git a/.github/workflows/test/.gitignore b/.github/workflows/test/.gitignore
index b4ddc884c6b..1233aaed111 100644
--- a/.github/workflows/test/.gitignore
+++ b/.github/workflows/test/.gitignore
@@ -1 +1 @@
-.secrets
\ No newline at end of file
+.secrets
diff --git a/.github/workflows/test/.secrets.EXAMPLE b/.github/workflows/test/.secrets.EXAMPLE
index 3e790800a7e..9b3e0acc9c1 100644
--- a/.github/workflows/test/.secrets.EXAMPLE
+++ b/.github/workflows/test/.secrets.EXAMPLE
@@ -1 +1 @@
-GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
\ No newline at end of file
+GITHUB_TOKEN=GH_PERSONAL_ACCESS_TOKEN_GOES_HERE
diff --git a/.github/workflows/test/inputs/release_docker.json b/.github/workflows/test/inputs/release_docker.json
index a219c00de6e..f5bbcb176ba 100644
--- a/.github/workflows/test/inputs/release_docker.json
+++ b/.github/workflows/test/inputs/release_docker.json
@@ -3,4 +3,4 @@
       "version_number": "1.0.1",
       "package": "dbt-postgres"
     }
-}
\ No newline at end of file
+}
diff --git a/.github/workflows/version-bump.yml b/.github/workflows/version-bump.yml
index 9860fe3a192..0bfce106b6b 100644
--- a/.github/workflows/version-bump.yml
+++ b/.github/workflows/version-bump.yml
@@ -1,16 +1,16 @@
 # **what?**
 # This workflow will take a version number and a dry run flag. With that
-# it will run versionbump to update the version number everywhere in the 
+# it will run versionbump to update the version number everywhere in the
 # code base and then generate an update Docker requirements file. If this
 # is a dry run, a draft PR will open with the changes. If this isn't a dry
 # run, the changes will be committed to the branch this is run on.
 
 # **why?**
-# This is to aid in releasing dbt and making sure we have updated 
+# This is to aid in releasing dbt and making sure we have updated
 # the versions and Docker requirements in all places.
 
 # **when?**
-# This is triggered either manually OR 
+# This is triggered either manually OR
 # from the repository_dispatch event "version-bump" which is sent from
 # the dbt-release repo Action
 
@@ -25,10 +25,10 @@ on:
       is_dry_run:
        description: 'Creates a draft PR to allow testing instead of committing to a branch'
        required: true
-       default: 'true'  
+       default: 'true'
   repository_dispatch:
     types: [version-bump]
-  
+
 jobs:
   bump:
     runs-on: ubuntu-latest
@@ -57,26 +57,26 @@ jobs:
         run: |
           python3 -m venv env
           source env/bin/activate
-          pip install --upgrade pip     
-          
+          pip install --upgrade pip
+
       - name: Create PR branch
         if: ${{ steps.variables.outputs.IS_DRY_RUN == 'true' }}
         run: |
           git checkout -b bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
           git push origin bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
           git branch --set-upstream-to=origin/bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID bumping-version/${{steps.variables.outputs.VERSION_NUMBER}}_$GITHUB_RUN_ID
-      
+
       # - name: Generate Docker requirements
       #  run: |
       #    source env/bin/activate
-      #    pip install -r requirements.txt 
+      #    pip install -r requirements.txt
       #    pip freeze -l > docker/requirements/requirements.txt
       #    git status
 
       - name: Bump version
         run: |
           source env/bin/activate
-          pip install -r dev-requirements.txt 
+          pip install -r dev-requirements.txt
           env/bin/bumpversion --allow-dirty --new-version ${{steps.variables.outputs.VERSION_NUMBER}} major
           git status
 
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e4ecd165874..bd2e6326205 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@
 - Clean up test deprecation warnings ([#3988](https://github.com/dbt-labs/dbt-core/issue/3988), [#4556](https://github.com/dbt-labs/dbt-core/pull/4556))
 - Use mashumaro for serialization in event logging ([#4504](https://github.com/dbt-labs/dbt-core/issues/4504), [#4505](https://github.com/dbt-labs/dbt-core/pull/4505))
 - Drop support for Python 3.7.0 + 3.7.1 ([#4584](https://github.com/dbt-labs/dbt-core/issues/4584), [#4585](https://github.com/dbt-labs/dbt-core/pull/4585), [#4643](https://github.com/dbt-labs/dbt-core/pull/4643))
+- Re-format codebase (except tests) using pre-commit hooks ([#3195](https://github.com/dbt-labs/dbt-core/issues/3195), [#4697](https://github.com/dbt-labs/dbt-core/pull/4697))
 
 Contributors:
 - [@NiallRees](https://github.com/NiallRees) ([#4447](https://github.com/dbt-labs/dbt-core/pull/4447))
@@ -28,7 +29,7 @@ Contributors:
 - Projects created using `dbt init` now have the correct `seeds` directory created (instead of `data`) ([#4588](https://github.com/dbt-labs/dbt-core/issues/4588), [#4599](https://github.com/dbt-labs/dbt-core/pull/4589))
 - Don't require a profile for dbt deps and clean commands ([#4554](https://github.com/dbt-labs/dbt-core/issues/4554), [#4610](https://github.com/dbt-labs/dbt-core/pull/4610))
 - Select modified.body works correctly when new model added([#4570](https://github.com/dbt-labs/dbt-core/issues/4570), [#4631](https://github.com/dbt-labs/dbt-core/pull/4631))
-- Fix bug in retry logic for bad response from hub and when there is a bad git tarball download. ([#4577](https://github.com/dbt-labs/dbt-core/issues/4577), [#4579](https://github.com/dbt-labs/dbt-core/issues/4579), [#4609](https://github.com/dbt-labs/dbt-core/pull/4609)) 
+- Fix bug in retry logic for bad response from hub and when there is a bad git tarball download. ([#4577](https://github.com/dbt-labs/dbt-core/issues/4577), [#4579](https://github.com/dbt-labs/dbt-core/issues/4579), [#4609](https://github.com/dbt-labs/dbt-core/pull/4609))
 - Restore previous log level (DEBUG) when a test depends on a disabled resource. Still WARN if the resource is missing ([#4594](https://github.com/dbt-labs/dbt-core/issues/4594), [#4647](https://github.com/dbt-labs/dbt-core/pull/4647))
 - Add project name validation to `dbt init` ([#4490](https://github.com/dbt-labs/dbt-core/issues/4490),[#4536](https://github.com/dbt-labs/dbt-core/pull/4536))
 - Support click versions in the v7.x series ([#4681](https://github.com/dbt-labs/dbt-core/pull/4681))
diff --git a/Makefile b/Makefile
index db22e88d3ea..b842df5572e 100644
--- a/Makefile
+++ b/Makefile
@@ -85,4 +85,3 @@ help: ## Show this help message.
 	@echo
 	@echo 'options:'
 	@echo 'use USE_DOCKER=true to run target in a docker container'
-
diff --git a/core/dbt/README.md b/core/dbt/README.md
index 7884072a7ce..5886bf37525 100644
--- a/core/dbt/README.md
+++ b/core/dbt/README.md
@@ -4,11 +4,11 @@
 
 ### deprecations.py
 
-### flags.py		
+### flags.py
 
-### main.py			
+### main.py
 
-### tracking.py		
+### tracking.py
 
 ### version.py
 
@@ -16,25 +16,25 @@
 
 ### node_types.py
 
-### helper_types.py		
+### helper_types.py
 
-### links.py		
+### links.py
 
-### semver.py		
+### semver.py
 
 ### ui.py
 
-### compilation.py		
+### compilation.py
 
-### dataclass_schema.py	
+### dataclass_schema.py
 
-### exceptions.py		
+### exceptions.py
 
-### hooks.py		
+### hooks.py
 
-### logger.py		
+### logger.py
 
-### profiler.py		
+### profiler.py
 
 ### utils.py
 
@@ -49,4 +49,3 @@
 * task
 * clients
 * events
-
diff --git a/core/dbt/adapters/base/column.py b/core/dbt/adapters/base/column.py
index df0319c3d60..46cfbaa4a49 100644
--- a/core/dbt/adapters/base/column.py
+++ b/core/dbt/adapters/base/column.py
@@ -8,10 +8,10 @@
 @dataclass
 class Column:
     TYPE_LABELS: ClassVar[Dict[str, str]] = {
-        'STRING': 'TEXT',
-        'TIMESTAMP': 'TIMESTAMP',
-        'FLOAT': 'FLOAT',
-        'INTEGER': 'INT'
+        "STRING": "TEXT",
+        "TIMESTAMP": "TIMESTAMP",
+        "FLOAT": "FLOAT",
+        "INTEGER": "INT",
     }
     column: str
     dtype: str
@@ -24,7 +24,7 @@ def translate_type(cls, dtype: str) -> str:
         return cls.TYPE_LABELS.get(dtype.upper(), dtype)
 
     @classmethod
-    def create(cls, name, label_or_dtype: str) -> 'Column':
+    def create(cls, name, label_or_dtype: str) -> "Column":
         column_type = cls.translate_type(label_or_dtype)
         return cls(name, column_type)
 
@@ -41,14 +41,12 @@ def data_type(self) -> str:
         if self.is_string():
             return Column.string_type(self.string_size())
         elif self.is_numeric():
-            return Column.numeric_type(self.dtype, self.numeric_precision,
-                                       self.numeric_scale)
+            return Column.numeric_type(self.dtype, self.numeric_precision, self.numeric_scale)
         else:
             return self.dtype
 
     def is_string(self) -> bool:
-        return self.dtype.lower() in ['text', 'character varying', 'character',
-                                      'varchar']
+        return self.dtype.lower() in ["text", "character varying", "character", "varchar"]
 
     def is_number(self):
         return any([self.is_integer(), self.is_numeric(), self.is_float()])
@@ -56,33 +54,45 @@ def is_number(self):
     def is_float(self):
         return self.dtype.lower() in [
             # floats
-            'real', 'float4', 'float', 'double precision', 'float8'
+            "real",
+            "float4",
+            "float",
+            "double precision",
+            "float8",
         ]
 
     def is_integer(self) -> bool:
         return self.dtype.lower() in [
             # real types
-            'smallint', 'integer', 'bigint',
-            'smallserial', 'serial', 'bigserial',
+            "smallint",
+            "integer",
+            "bigint",
+            "smallserial",
+            "serial",
+            "bigserial",
             # aliases
-            'int2', 'int4', 'int8',
-            'serial2', 'serial4', 'serial8',
+            "int2",
+            "int4",
+            "int8",
+            "serial2",
+            "serial4",
+            "serial8",
         ]
 
     def is_numeric(self) -> bool:
-        return self.dtype.lower() in ['numeric', 'decimal']
+        return self.dtype.lower() in ["numeric", "decimal"]
 
     def string_size(self) -> int:
         if not self.is_string():
             raise RuntimeException("Called string_size() on non-string field!")
 
-        if self.dtype == 'text' or self.char_size is None:
+        if self.dtype == "text" or self.char_size is None:
             # char_size should never be None. Handle it reasonably just in case
             return 256
         else:
             return int(self.char_size)
 
-    def can_expand_to(self, other_column: 'Column') -> bool:
+    def can_expand_to(self, other_column: "Column") -> bool:
         """returns True if this column can be expanded to the size of the
         other column"""
         if not self.is_string() or not other_column.is_string():
@@ -110,12 +120,10 @@ def __repr__(self) -> str:
         return "".format(self.name, self.data_type)
 
     @classmethod
-    def from_description(cls, name: str, raw_data_type: str) -> 'Column':
-        match = re.match(r'([^(]+)(\([^)]+\))?', raw_data_type)
+    def from_description(cls, name: str, raw_data_type: str) -> "Column":
+        match = re.match(r"([^(]+)(\([^)]+\))?", raw_data_type)
         if match is None:
-            raise RuntimeException(
-                f'Could not interpret data type "{raw_data_type}"'
-            )
+            raise RuntimeException(f'Could not interpret data type "{raw_data_type}"')
         data_type, size_info = match.groups()
         char_size = None
         numeric_precision = None
@@ -123,7 +131,7 @@ def from_description(cls, name: str, raw_data_type: str) -> 'Column':
         if size_info is not None:
             # strip out the parentheses
             size_info = size_info[1:-1]
-            parts = size_info.split(',')
+            parts = size_info.split(",")
             if len(parts) == 1:
                 try:
                     char_size = int(parts[0])
@@ -148,6 +156,4 @@ def from_description(cls, name: str, raw_data_type: str) -> 'Column':
                         f'could not convert "{parts[1]}" to an integer'
                     )
 
-        return cls(
-            name, data_type, char_size, numeric_precision, numeric_scale
-        )
+        return cls(name, data_type, char_size, numeric_precision, numeric_scale)
diff --git a/core/dbt/adapters/base/connections.py b/core/dbt/adapters/base/connections.py
index 218aa287bf9..b540c54f1f3 100644
--- a/core/dbt/adapters/base/connections.py
+++ b/core/dbt/adapters/base/connections.py
@@ -1,18 +1,21 @@
 import abc
 import os
+
 # multiprocessing.RLock is a function returning this type
 from multiprocessing.synchronize import RLock
 from threading import get_ident
-from typing import (
-    Dict, Tuple, Hashable, Optional, ContextManager, List, Union
-)
+from typing import Dict, Tuple, Hashable, Optional, ContextManager, List, Union
 
 import agate
 
 import dbt.exceptions
 from dbt.contracts.connection import (
-    Connection, Identifier, ConnectionState,
-    AdapterRequiredConfig, LazyHandle, AdapterResponse
+    Connection,
+    Identifier,
+    ConnectionState,
+    AdapterRequiredConfig,
+    LazyHandle,
+    AdapterResponse,
 )
 from dbt.contracts.graph.manifest import Manifest
 from dbt.adapters.base.query_headers import (
@@ -27,7 +30,7 @@
     ConnectionClosed,
     ConnectionClosed2,
     Rollback,
-    RollbackFailed
+    RollbackFailed,
 )
 from dbt import flags
 
@@ -45,6 +48,7 @@ class BaseConnectionManager(metaclass=abc.ABCMeta):
     You must also set the 'TYPE' class attribute with a class-unique constant
     string.
     """
+
     TYPE: str = NotImplemented
 
     def __init__(self, profile: AdapterRequiredConfig):
@@ -66,16 +70,14 @@ def get_thread_connection(self) -> Connection:
         key = self.get_thread_identifier()
         with self.lock:
             if key not in self.thread_connections:
-                raise dbt.exceptions.InvalidConnectionException(
-                    key, list(self.thread_connections)
-                )
+                raise dbt.exceptions.InvalidConnectionException(key, list(self.thread_connections))
             return self.thread_connections[key]
 
     def set_thread_connection(self, conn: Connection) -> None:
         key = self.get_thread_identifier()
         if key in self.thread_connections:
             raise dbt.exceptions.InternalException(
-                'In set_thread_connection, existing connection exists for {}'
+                "In set_thread_connection, existing connection exists for {}"
             )
         self.thread_connections[key] = conn
 
@@ -115,18 +117,19 @@ def exception_handler(self, sql: str) -> ContextManager:
             underlying database.
         """
         raise dbt.exceptions.NotImplementedException(
-            '`exception_handler` is not implemented for this adapter!')
+            "`exception_handler` is not implemented for this adapter!"
+        )
 
     def set_connection_name(self, name: Optional[str] = None) -> Connection:
         conn_name: str
         if name is None:
             # if a name isn't specified, we'll re-use a single handle
             # named 'master'
-            conn_name = 'master'
+            conn_name = "master"
         else:
             if not isinstance(name, str):
                 raise dbt.exceptions.CompilerException(
-                    f'For connection name, got {name} - not a string!'
+                    f"For connection name, got {name} - not a string!"
                 )
             assert isinstance(name, str)
             conn_name = name
@@ -139,16 +142,16 @@ def set_connection_name(self, name: Optional[str] = None) -> Connection:
                 state=ConnectionState.INIT,
                 transaction_open=False,
                 handle=None,
-                credentials=self.profile.credentials
+                credentials=self.profile.credentials,
             )
             self.set_thread_connection(conn)
 
-        if conn.name == conn_name and conn.state == 'open':
+        if conn.name == conn_name and conn.state == "open":
             return conn
 
         fire_event(NewConnection(conn_name=conn_name, conn_type=self.TYPE))
 
-        if conn.state == 'open':
+        if conn.state == "open":
             fire_event(ConnectionReused(conn_name=conn_name))
         else:
             conn.handle = LazyHandle(self.open)
@@ -160,7 +163,7 @@ def set_connection_name(self, name: Optional[str] = None) -> Connection:
     def cancel_open(self) -> Optional[List[str]]:
         """Cancel all open connections on the adapter. (passable)"""
         raise dbt.exceptions.NotImplementedException(
-            '`cancel_open` is not implemented for this adapter!'
+            "`cancel_open` is not implemented for this adapter!"
         )
 
     @abc.abstractclassmethod
@@ -173,9 +176,7 @@ def open(cls, connection: Connection) -> Connection:
         This should be thread-safe, or hold the lock if necessary. The given
         connection should not be in either in_use or available.
         """
-        raise dbt.exceptions.NotImplementedException(
-            '`open` is not implemented for this adapter!'
-        )
+        raise dbt.exceptions.NotImplementedException("`open` is not implemented for this adapter!")
 
     def release(self) -> None:
         with self.lock:
@@ -195,7 +196,7 @@ def release(self) -> None:
     def cleanup_all(self) -> None:
         with self.lock:
             for connection in self.thread_connections.values():
-                if connection.state not in {'closed', 'init'}:
+                if connection.state not in {"closed", "init"}:
                     fire_event(ConnectionLeftOpen(conn_name=connection.name))
                 else:
                     fire_event(ConnectionClosed(conn_name=connection.name))
@@ -208,14 +209,14 @@ def cleanup_all(self) -> None:
     def begin(self) -> None:
         """Begin a transaction. (passable)"""
         raise dbt.exceptions.NotImplementedException(
-            '`begin` is not implemented for this adapter!'
+            "`begin` is not implemented for this adapter!"
         )
 
     @abc.abstractmethod
     def commit(self) -> None:
         """Commit a transaction. (passable)"""
         raise dbt.exceptions.NotImplementedException(
-            '`commit` is not implemented for this adapter!'
+            "`commit` is not implemented for this adapter!"
         )
 
     @classmethod
@@ -230,7 +231,7 @@ def _rollback_handle(cls, connection: Connection) -> None:
     def _close_handle(cls, connection: Connection) -> None:
         """Perform the actual close operation."""
         # On windows, sometimes connection handles don't have a close() attr.
-        if hasattr(connection.handle, 'close'):
+        if hasattr(connection.handle, "close"):
             fire_event(ConnectionClosed2(conn_name=connection.name))
             connection.handle.close()
         else:
@@ -241,7 +242,7 @@ def _rollback(cls, connection: Connection) -> None:
         """Roll back the given connection."""
         if connection.transaction_open is False:
             raise dbt.exceptions.InternalException(
-                f'Tried to rollback transaction on connection '
+                f"Tried to rollback transaction on connection "
                 f'"{connection.name}", but it does not have one open!'
             )
 
@@ -291,5 +292,5 @@ def execute(
         :rtype: Tuple[Union[str, AdapterResponse], agate.Table]
         """
         raise dbt.exceptions.NotImplementedException(
-            '`execute` is not implemented for this adapter!'
+            "`execute` is not implemented for this adapter!"
         )
diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py
index d55de305d7a..308c8205c58 100644
--- a/core/dbt/adapters/base/impl.py
+++ b/core/dbt/adapters/base/impl.py
@@ -4,17 +4,31 @@
 from datetime import datetime
 from itertools import chain
 from typing import (
-    Optional, Tuple, Callable, Iterable, Type, Dict, Any, List, Mapping,
-    Iterator, Union, Set
+    Optional,
+    Tuple,
+    Callable,
+    Iterable,
+    Type,
+    Dict,
+    Any,
+    List,
+    Mapping,
+    Iterator,
+    Union,
+    Set,
 )
 
 import agate
 import pytz
 
 from dbt.exceptions import (
-    raise_database_error, raise_compiler_error, invalid_type_error,
+    raise_database_error,
+    raise_compiler_error,
+    invalid_type_error,
     get_relation_returned_multiple_results,
-    InternalException, NotImplementedException, RuntimeException,
+    InternalException,
+    NotImplementedException,
+    RuntimeException,
 )
 
 from dbt.adapters.protocol import (
@@ -23,9 +37,7 @@
 )
 from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows
 from dbt.clients.jinja import MacroGenerator
-from dbt.contracts.graph.compiled import (
-    CompileResultNode, CompiledSeedNode
-)
+from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode
 from dbt.contracts.graph.manifest import Manifest, MacroManifest
 from dbt.contracts.graph.parsed import ParsedSeedNode
 from dbt.exceptions import warn_or_error
@@ -36,7 +48,10 @@
 from dbt.adapters.base.connections import Connection, AdapterResponse
 from dbt.adapters.base.meta import AdapterMeta, available
 from dbt.adapters.base.relation import (
-    ComponentName, BaseRelation, InformationSchema, SchemaSearchMap
+    ComponentName,
+    BaseRelation,
+    InformationSchema,
+    SchemaSearchMap,
 )
 from dbt.adapters.base import Column as BaseColumn
 from dbt.adapters.cache import RelationsCache, _make_key
@@ -45,15 +60,14 @@
 SeedModel = Union[ParsedSeedNode, CompiledSeedNode]
 
 
-GET_CATALOG_MACRO_NAME = 'get_catalog'
-FRESHNESS_MACRO_NAME = 'collect_freshness'
+GET_CATALOG_MACRO_NAME = "get_catalog"
+FRESHNESS_MACRO_NAME = "collect_freshness"
 
 
 def _expect_row_value(key: str, row: agate.Row):
     if key not in row.keys():
         raise InternalException(
-            'Got a row without "{}" column, columns: {}'
-            .format(key, row.keys())
+            'Got a row without "{}" column, columns: {}'.format(key, row.keys())
         )
     return row[key]
 
@@ -62,40 +76,37 @@ def _catalog_filter_schemas(manifest: Manifest) -> Callable[[agate.Row], bool]:
     """Return a function that takes a row and decides if the row should be
     included in the catalog output.
     """
-    schemas = frozenset((d.lower(), s.lower())
-                        for d, s in manifest.get_used_schemas())
+    schemas = frozenset((d.lower(), s.lower()) for d, s in manifest.get_used_schemas())
 
     def test(row: agate.Row) -> bool:
-        table_database = _expect_row_value('table_database', row)
-        table_schema = _expect_row_value('table_schema', row)
+        table_database = _expect_row_value("table_database", row)
+        table_schema = _expect_row_value("table_schema", row)
         # the schema may be present but None, which is not an error and should
         # be filtered out
         if table_schema is None:
             return False
         return (table_database.lower(), table_schema.lower()) in schemas
+
     return test
 
 
-def _utc(
-    dt: Optional[datetime], source: BaseRelation, field_name: str
-) -> datetime:
+def _utc(dt: Optional[datetime], source: BaseRelation, field_name: str) -> datetime:
     """If dt has a timezone, return a new datetime that's in UTC. Otherwise,
     assume the datetime is already for UTC and add the timezone.
     """
     if dt is None:
         raise raise_database_error(
             "Expected a non-null value when querying field '{}' of table "
-            " {} but received value 'null' instead".format(
-                field_name,
-                source))
+            " {} but received value 'null' instead".format(field_name, source)
+        )
 
-    elif not hasattr(dt, 'tzinfo'):
+    elif not hasattr(dt, "tzinfo"):
         raise raise_database_error(
             "Expected a timestamp value when querying field '{}' of table "
             "{} but received value of type '{}' instead".format(
-                field_name,
-                source,
-                type(dt).__name__))
+                field_name, source, type(dt).__name__
+            )
+        )
 
     elif dt.tzinfo:
         return dt.astimezone(pytz.UTC)
@@ -105,7 +116,7 @@ def _utc(
 
 def _relation_name(rel: Optional[BaseRelation]) -> str:
     if rel is None:
-        return 'null relation'
+        return "null relation"
     else:
         return str(rel)
 
@@ -146,6 +157,7 @@ class BaseAdapter(metaclass=AdapterMeta):
     Macros:
         - get_catalog
     """
+
     Relation: Type[BaseRelation] = BaseRelation
     Column: Type[BaseColumn] = BaseColumn
     ConnectionManager: Type[ConnectionManagerProtocol]
@@ -179,12 +191,12 @@ def commit_if_has_connection(self) -> None:
         self.connections.commit_if_has_connection()
 
     def debug_query(self) -> None:
-        self.execute('select 1 as id')
+        self.execute("select 1 as id")
 
     def nice_connection_name(self) -> str:
         conn = self.connections.get_if_exists()
         if conn is None or conn.name is None:
-            return ''
+            return ""
         return conn.name
 
     @contextmanager
@@ -202,13 +214,11 @@ def connection_named(
                 self.connections.query_header.reset()
 
     @contextmanager
-    def connection_for(
-        self, node: CompileResultNode
-    ) -> Iterator[None]:
+    def connection_for(self, node: CompileResultNode) -> Iterator[None]:
         with self.connection_named(node.unique_id, node):
             yield
 
-    @available.parse(lambda *a, **k: ('', empty_table()))
+    @available.parse(lambda *a, **k: ("", empty_table()))
     def execute(
         self, sql: str, auto_begin: bool = False, fetch: bool = False
     ) -> Tuple[Union[str, AdapterResponse], agate.Table]:
@@ -222,16 +232,10 @@ def execute(
         :return: A tuple of the status and the results (empty if fetch=False).
         :rtype: Tuple[Union[str, AdapterResponse], agate.Table]
         """
-        return self.connections.execute(
-            sql=sql,
-            auto_begin=auto_begin,
-            fetch=fetch
-        )
+        return self.connections.execute(sql=sql, auto_begin=auto_begin, fetch=fetch)
 
-    @available.parse(lambda *a, **k: ('', empty_table()))
-    def get_partitions_metadata(
-        self, table: str
-    ) -> Tuple[agate.Table]:
+    @available.parse(lambda *a, **k: ("", empty_table()))
+    def get_partitions_metadata(self, table: str) -> Tuple[agate.Table]:
         """Obtain partitions metadata for a BigQuery partitioned table.
 
         :param str table_id: a partitioned table id, in standard SQL format.
@@ -239,9 +243,7 @@ def get_partitions_metadata(
             https://cloud.google.com/bigquery/docs/creating-partitioned-tables#getting_partition_metadata_using_meta_tables.
         :rtype: agate.Table
         """
-        return self.connections.get_partitions_metadata(
-            table=table
-        )
+        return self.connections.get_partitions_metadata(table=table)
 
     ###
     # Methods that should never be overridden
@@ -272,11 +274,12 @@ def load_macro_manifest(self) -> MacroManifest:
         if self._macro_manifest_lazy is None:
             # avoid a circular import
             from dbt.parser.manifest import ManifestLoader
-            manifest = ManifestLoader.load_macros(
-                self.config, self.connections.set_query_header
-            )
-            self._macro_manifest_lazy = manifest
-        return self._macro_manifest_lazy
+
+            manifest = ManifestLoader.load_macros(self.config, self.connections.set_query_header)
+            # TODO CT-211
+            self._macro_manifest_lazy = manifest  # type: ignore[assignment]
+        # TODO CT-211
+        return self._macro_manifest_lazy  # type: ignore[return-value]
 
     def clear_macro_manifest(self):
         if self._macro_manifest_lazy is not None:
@@ -290,11 +293,7 @@ def _schema_is_cached(self, database: Optional[str], schema: str) -> bool:
 
         if (database, schema) not in self.cache:
             fire_event(
-                CacheMiss(
-                    conn_name=self.nice_connection_name(),
-                    database=database,
-                    schema=schema
-                )
+                CacheMiss(conn_name=self.nice_connection_name(), database=database, schema=schema)
             )
             return False
         else:
@@ -308,9 +307,7 @@ def _get_cache_schemas(self, manifest: Manifest) -> Set[BaseRelation]:
         return {
             self.Relation.create_from(self.config, node).without_identifier()
             for node in manifest.nodes.values()
-            if (
-                node.is_relational and not node.is_ephemeral_model
-            )
+            if (node.is_relational and not node.is_ephemeral_model)
         }
 
     def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap:
@@ -324,9 +321,11 @@ def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap:
         """
         info_schema_name_map = SchemaSearchMap()
         nodes: Iterator[CompileResultNode] = chain(
-            [node for node in manifest.nodes.values() if (
-                node.is_relational and not node.is_ephemeral_model
-            )],
+            [
+                node
+                for node in manifest.nodes.values()
+                if (node.is_relational and not node.is_ephemeral_model)
+            ],
             manifest.sources.values(),
         )
         for node in nodes:
@@ -348,9 +347,9 @@ def _relations_cache_for_schemas(self, manifest: Manifest) -> None:
             for cache_schema in cache_schemas:
                 fut = tpe.submit_connected(
                     self,
-                    f'list_{cache_schema.database}_{cache_schema.schema}',
+                    f"list_{cache_schema.database}_{cache_schema.schema}",
                     self.list_relations_without_caching,
-                    cache_schema
+                    cache_schema,
                 )
                 futures.append(fut)
 
@@ -368,9 +367,7 @@ def _relations_cache_for_schemas(self, manifest: Manifest) -> None:
             cache_update.add((relation.database, relation.schema))
         self.cache.update_schemas(cache_update)
 
-    def set_relations_cache(
-        self, manifest: Manifest, clear: bool = False
-    ) -> None:
+    def set_relations_cache(self, manifest: Manifest, clear: bool = False) -> None:
         """Run a query that gets a populated cache of the relations in the
         database and set the cache on this adapter.
         """
@@ -384,12 +381,10 @@ def cache_added(self, relation: Optional[BaseRelation]) -> str:
         """Cache a new relation in dbt. It will show up in `list relations`."""
         if relation is None:
             name = self.nice_connection_name()
-            raise_compiler_error(
-                'Attempted to cache a null relation for {}'.format(name)
-            )
+            raise_compiler_error("Attempted to cache a null relation for {}".format(name))
         self.cache.add(relation)
         # so jinja doesn't render things
-        return ''
+        return ""
 
     @available
     def cache_dropped(self, relation: Optional[BaseRelation]) -> str:
@@ -398,11 +393,9 @@ def cache_dropped(self, relation: Optional[BaseRelation]) -> str:
         """
         if relation is None:
             name = self.nice_connection_name()
-            raise_compiler_error(
-                'Attempted to drop a null relation for {}'.format(name)
-            )
+            raise_compiler_error("Attempted to drop a null relation for {}".format(name))
         self.cache.drop(relation)
-        return ''
+        return ""
 
     @available
     def cache_renamed(
@@ -418,12 +411,11 @@ def cache_renamed(
             src_name = _relation_name(from_relation)
             dst_name = _relation_name(to_relation)
             raise_compiler_error(
-                'Attempted to rename {} to {} for {}'
-                .format(src_name, dst_name, name)
+                "Attempted to rename {} to {} for {}".format(src_name, dst_name, name)
             )
 
         self.cache.rename(from_relation, to_relation)
-        return ''
+        return ""
 
     ###
     # Abstract methods for database-specific values, attributes, and types
@@ -431,14 +423,11 @@ def cache_renamed(
     @abc.abstractclassmethod
     def date_function(cls) -> str:
         """Get the date function used by this adapter's database."""
-        raise NotImplementedException(
-            '`date_function` is not implemented for this adapter!')
+        raise NotImplementedException("`date_function` is not implemented for this adapter!")
 
     @abc.abstractclassmethod
     def is_cancelable(cls) -> bool:
-        raise NotImplementedException(
-            '`is_cancelable` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`is_cancelable` is not implemented for this adapter!")
 
     ###
     # Abstract methods about schemas
@@ -446,9 +435,7 @@ def is_cancelable(cls) -> bool:
     @abc.abstractmethod
     def list_schemas(self, database: str) -> List[str]:
         """Get a list of existing schemas in database"""
-        raise NotImplementedException(
-            '`list_schemas` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`list_schemas` is not implemented for this adapter!")
 
     @available.parse(lambda *a, **k: False)
     def check_schema_exists(self, database: str, schema: str) -> bool:
@@ -458,10 +445,7 @@ def check_schema_exists(self, database: str, schema: str) -> bool:
         and adapters should implement it if there is an optimized path (and
         there probably is)
         """
-        search = (
-            s.lower() for s in
-            self.list_schemas(database=database)
-        )
+        search = (s.lower() for s in self.list_schemas(database=database))
         return schema.lower() in search
 
     ###
@@ -474,58 +458,44 @@ def drop_relation(self, relation: BaseRelation) -> None:
 
         *Implementors must call self.cache.drop() to preserve cache state!*
         """
-        raise NotImplementedException(
-            '`drop_relation` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`drop_relation` is not implemented for this adapter!")
 
     @abc.abstractmethod
     @available.parse_none
     def truncate_relation(self, relation: BaseRelation) -> None:
         """Truncate the given relation."""
-        raise NotImplementedException(
-            '`truncate_relation` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`truncate_relation` is not implemented for this adapter!")
 
     @abc.abstractmethod
     @available.parse_none
-    def rename_relation(
-        self, from_relation: BaseRelation, to_relation: BaseRelation
-    ) -> None:
+    def rename_relation(self, from_relation: BaseRelation, to_relation: BaseRelation) -> None:
         """Rename the relation from from_relation to to_relation.
 
         Implementors must call self.cache.rename() to preserve cache state.
         """
-        raise NotImplementedException(
-            '`rename_relation` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`rename_relation` is not implemented for this adapter!")
 
     @abc.abstractmethod
     @available.parse_list
-    def get_columns_in_relation(
-        self, relation: BaseRelation
-    ) -> List[BaseColumn]:
-        """Get a list of the columns in the given Relation. """
+    def get_columns_in_relation(self, relation: BaseRelation) -> List[BaseColumn]:
+        """Get a list of the columns in the given Relation."""
         raise NotImplementedException(
-            '`get_columns_in_relation` is not implemented for this adapter!'
+            "`get_columns_in_relation` is not implemented for this adapter!"
         )
 
-    @available.deprecated('get_columns_in_relation', lambda *a, **k: [])
-    def get_columns_in_table(
-        self, schema: str, identifier: str
-    ) -> List[BaseColumn]:
+    @available.deprecated("get_columns_in_relation", lambda *a, **k: [])
+    def get_columns_in_table(self, schema: str, identifier: str) -> List[BaseColumn]:
         """DEPRECATED: Get a list of the columns in the given table."""
         relation = self.Relation.create(
             database=self.config.credentials.database,
             schema=schema,
             identifier=identifier,
-            quote_policy=self.config.quoting
+            quote_policy=self.config.quoting,
         )
         return self.get_columns_in_relation(relation)
 
     @abc.abstractmethod
-    def expand_column_types(
-        self, goal: BaseRelation, current: BaseRelation
-    ) -> None:
+    def expand_column_types(self, goal: BaseRelation, current: BaseRelation) -> None:
         """Expand the current table's types to match the goal table. (passable)
 
         :param self.Relation goal: A relation that currently exists in the
@@ -534,13 +504,11 @@ def expand_column_types(
             database with columns of unspecified types.
         """
         raise NotImplementedException(
-            '`expand_target_column_types` is not implemented for this adapter!'
+            "`expand_target_column_types` is not implemented for this adapter!"
         )
 
     @abc.abstractmethod
-    def list_relations_without_caching(
-        self, schema_relation: BaseRelation
-    ) -> List[BaseRelation]:
+    def list_relations_without_caching(self, schema_relation: BaseRelation) -> List[BaseRelation]:
         """List relations in the given schema, bypassing the cache.
 
         This is used as the underlying behavior to fill the cache.
@@ -551,8 +519,7 @@ def list_relations_without_caching(
         :rtype: List[self.Relation]
         """
         raise NotImplementedException(
-            '`list_relations_without_caching` is not implemented for this '
-            'adapter!'
+            "`list_relations_without_caching` is not implemented for this " "adapter!"
         )
 
     ###
@@ -567,34 +534,27 @@ def get_missing_columns(
         """
         if not isinstance(from_relation, self.Relation):
             invalid_type_error(
-                method_name='get_missing_columns',
-                arg_name='from_relation',
+                method_name="get_missing_columns",
+                arg_name="from_relation",
                 got_value=from_relation,
-                expected_type=self.Relation)
+                expected_type=self.Relation,
+            )
 
         if not isinstance(to_relation, self.Relation):
             invalid_type_error(
-                method_name='get_missing_columns',
-                arg_name='to_relation',
+                method_name="get_missing_columns",
+                arg_name="to_relation",
                 got_value=to_relation,
-                expected_type=self.Relation)
+                expected_type=self.Relation,
+            )
 
-        from_columns = {
-            col.name: col for col in
-            self.get_columns_in_relation(from_relation)
-        }
+        from_columns = {col.name: col for col in self.get_columns_in_relation(from_relation)}
 
-        to_columns = {
-            col.name: col for col in
-            self.get_columns_in_relation(to_relation)
-        }
+        to_columns = {col.name: col for col in self.get_columns_in_relation(to_relation)}
 
         missing_columns = set(from_columns.keys()) - set(to_columns.keys())
 
-        return [
-            col for (col_name, col) in from_columns.items()
-            if col_name in missing_columns
-        ]
+        return [col for (col_name, col) in from_columns.items() if col_name in missing_columns]
 
     @available.parse_none
     def valid_snapshot_target(self, relation: BaseRelation) -> None:
@@ -607,18 +567,19 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:
         """
         if not isinstance(relation, self.Relation):
             invalid_type_error(
-                method_name='valid_snapshot_target',
-                arg_name='relation',
+                method_name="valid_snapshot_target",
+                arg_name="relation",
                 got_value=relation,
-                expected_type=self.Relation)
+                expected_type=self.Relation,
+            )
 
         columns = self.get_columns_in_relation(relation)
         names = set(c.name.lower() for c in columns)
-        expanded_keys = ('scd_id', 'valid_from', 'valid_to')
+        expanded_keys = ("scd_id", "valid_from", "valid_to")
         extra = []
         missing = []
         for legacy in expanded_keys:
-            desired = 'dbt_' + legacy
+            desired = "dbt_" + legacy
             if desired not in names:
                 missing.append(desired)
                 if legacy in names:
@@ -628,13 +589,13 @@ def valid_snapshot_target(self, relation: BaseRelation) -> None:
             if extra:
                 msg = (
                     'Snapshot target has ("{}") but not ("{}") - is it an '
-                    'unmigrated previous version archive?'
-                    .format('", "'.join(extra), '", "'.join(missing))
+                    "unmigrated previous version archive?".format(
+                        '", "'.join(extra), '", "'.join(missing)
+                    )
                 )
             else:
-                msg = (
-                    'Snapshot target is not a snapshot table (missing "{}")'
-                    .format('", "'.join(missing))
+                msg = 'Snapshot target is not a snapshot table (missing "{}")'.format(
+                    '", "'.join(missing)
                 )
             raise_compiler_error(msg)
 
@@ -644,64 +605,59 @@ def expand_target_column_types(
     ) -> None:
         if not isinstance(from_relation, self.Relation):
             invalid_type_error(
-                method_name='expand_target_column_types',
-                arg_name='from_relation',
+                method_name="expand_target_column_types",
+                arg_name="from_relation",
                 got_value=from_relation,
-                expected_type=self.Relation)
+                expected_type=self.Relation,
+            )
 
         if not isinstance(to_relation, self.Relation):
             invalid_type_error(
-                method_name='expand_target_column_types',
-                arg_name='to_relation',
+                method_name="expand_target_column_types",
+                arg_name="to_relation",
                 got_value=to_relation,
-                expected_type=self.Relation)
+                expected_type=self.Relation,
+            )
 
         self.expand_column_types(from_relation, to_relation)
 
-    def list_relations(
-        self, database: Optional[str], schema: str
-    ) -> List[BaseRelation]:
+    def list_relations(self, database: Optional[str], schema: str) -> List[BaseRelation]:
         if self._schema_is_cached(database, schema):
             return self.cache.get_relations(database, schema)
 
         schema_relation = self.Relation.create(
-            database=database,
-            schema=schema,
-            identifier='',
-            quote_policy=self.config.quoting
+            database=database, schema=schema, identifier="", quote_policy=self.config.quoting
         ).without_identifier()
 
         # we can't build the relations cache because we don't have a
         # manifest so we can't run any operations.
-        relations = self.list_relations_without_caching(
-            schema_relation
+        relations = self.list_relations_without_caching(schema_relation)
+        fire_event(
+            ListRelations(
+                database=database, schema=schema, relations=[_make_key(x) for x in relations]
+            )
         )
-        fire_event(ListRelations(
-            database=database,
-            schema=schema,
-            relations=[_make_key(x) for x in relations]
-        ))
 
         return relations
 
-    def _make_match_kwargs(
-        self, database: str, schema: str, identifier: str
-    ) -> Dict[str, str]:
+    def _make_match_kwargs(self, database: str, schema: str, identifier: str) -> Dict[str, str]:
         quoting = self.config.quoting
-        if identifier is not None and quoting['identifier'] is False:
+        if identifier is not None and quoting["identifier"] is False:
             identifier = identifier.lower()
 
-        if schema is not None and quoting['schema'] is False:
+        if schema is not None and quoting["schema"] is False:
             schema = schema.lower()
 
-        if database is not None and quoting['database'] is False:
+        if database is not None and quoting["database"] is False:
             database = database.lower()
 
-        return filter_null_values({
-            'database': database,
-            'identifier': identifier,
-            'schema': schema,
-        })
+        return filter_null_values(
+            {
+                "database": database,
+                "identifier": identifier,
+                "schema": schema,
+            }
+        )
 
     def _make_match(
         self,
@@ -722,30 +678,25 @@ def _make_match(
         return matches
 
     @available.parse_none
-    def get_relation(
-        self, database: str, schema: str, identifier: str
-    ) -> Optional[BaseRelation]:
+    def get_relation(self, database: str, schema: str, identifier: str) -> Optional[BaseRelation]:
         relations_list = self.list_relations(database, schema)
 
-        matches = self._make_match(relations_list, database, schema,
-                                   identifier)
+        matches = self._make_match(relations_list, database, schema, identifier)
 
         if len(matches) > 1:
             kwargs = {
-                'identifier': identifier,
-                'schema': schema,
-                'database': database,
+                "identifier": identifier,
+                "schema": schema,
+                "database": database,
             }
-            get_relation_returned_multiple_results(
-                kwargs, matches
-            )
+            get_relation_returned_multiple_results(kwargs, matches)
 
         elif matches:
             return matches[0]
 
         return None
 
-    @available.deprecated('get_relation', lambda *a, **k: False)
+    @available.deprecated("get_relation", lambda *a, **k: False)
     def already_exists(self, schema: str, name: str) -> bool:
         """DEPRECATED: Return if a model already exists in the database"""
         database = self.config.credentials.database
@@ -760,25 +711,19 @@ def already_exists(self, schema: str, name: str) -> bool:
     @available.parse_none
     def create_schema(self, relation: BaseRelation):
         """Create the given schema if it does not exist."""
-        raise NotImplementedException(
-            '`create_schema` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`create_schema` is not implemented for this adapter!")
 
     @abc.abstractmethod
     @available.parse_none
     def drop_schema(self, relation: BaseRelation):
         """Drop the given schema (and everything in it) if it exists."""
-        raise NotImplementedException(
-            '`drop_schema` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`drop_schema` is not implemented for this adapter!")
 
     @available
     @abc.abstractclassmethod
     def quote(cls, identifier: str) -> str:
         """Quote the given identifier, as appropriate for the database."""
-        raise NotImplementedException(
-            '`quote` is not implemented for this adapter!'
-        )
+        raise NotImplementedException("`quote` is not implemented for this adapter!")
 
     @available
     def quote_as_configured(self, identifier: str, quote_key: str) -> str:
@@ -800,9 +745,7 @@ def quote_as_configured(self, identifier: str, quote_key: str) -> str:
             return identifier
 
     @available
-    def quote_seed_column(
-        self, column: str, quote_config: Optional[bool]
-    ) -> str:
+    def quote_seed_column(self, column: str, quote_config: Optional[bool]) -> str:
         quote_columns: bool = True
         if isinstance(quote_config, bool):
             quote_columns = quote_config
@@ -811,7 +754,7 @@ def quote_seed_column(
         else:
             raise_compiler_error(
                 f'The seed configuration value of "quote_columns" has an '
-                f'invalid type {type(quote_config)}'
+                f"invalid type {type(quote_config)}"
             )
 
         if quote_columns:
@@ -824,9 +767,7 @@ def quote_seed_column(
     # converting agate types into their sql equivalents.
     ###
     @abc.abstractclassmethod
-    def convert_text_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         """Return the type in the database that best maps to the agate.Text
         type for the given agate table and column index.
 
@@ -834,13 +775,10 @@ def convert_text_type(
         :param col_idx: The index into the agate table for the column.
         :return: The name of the type in the database
         """
-        raise NotImplementedException(
-            '`convert_text_type` is not implemented for this adapter!')
+        raise NotImplementedException("`convert_text_type` is not implemented for this adapter!")
 
     @abc.abstractclassmethod
-    def convert_number_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         """Return the type in the database that best maps to the agate.Number
         type for the given agate table and column index.
 
@@ -848,13 +786,10 @@ def convert_number_type(
         :param col_idx: The index into the agate table for the column.
         :return: The name of the type in the database
         """
-        raise NotImplementedException(
-            '`convert_number_type` is not implemented for this adapter!')
+        raise NotImplementedException("`convert_number_type` is not implemented for this adapter!")
 
     @abc.abstractclassmethod
-    def convert_boolean_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         """Return the type in the database that best maps to the agate.Boolean
         type for the given agate table and column index.
 
@@ -863,12 +798,11 @@ def convert_boolean_type(
         :return: The name of the type in the database
         """
         raise NotImplementedException(
-            '`convert_boolean_type` is not implemented for this adapter!')
+            "`convert_boolean_type` is not implemented for this adapter!"
+        )
 
     @abc.abstractclassmethod
-    def convert_datetime_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         """Return the type in the database that best maps to the agate.DateTime
         type for the given agate table and column index.
 
@@ -877,7 +811,8 @@ def convert_datetime_type(
         :return: The name of the type in the database
         """
         raise NotImplementedException(
-            '`convert_datetime_type` is not implemented for this adapter!')
+            "`convert_datetime_type` is not implemented for this adapter!"
+        )
 
     @abc.abstractclassmethod
     def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
@@ -888,8 +823,7 @@ def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         :param col_idx: The index into the agate table for the column.
         :return: The name of the type in the database
         """
-        raise NotImplementedException(
-            '`convert_date_type` is not implemented for this adapter!')
+        raise NotImplementedException("`convert_date_type` is not implemented for this adapter!")
 
     @abc.abstractclassmethod
     def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
@@ -900,20 +834,15 @@ def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         :param col_idx: The index into the agate table for the column.
         :return: The name of the type in the database
         """
-        raise NotImplementedException(
-            '`convert_time_type` is not implemented for this adapter!')
+        raise NotImplementedException("`convert_time_type` is not implemented for this adapter!")
 
     @available
     @classmethod
-    def convert_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> Optional[str]:
+    def convert_type(cls, agate_table: agate.Table, col_idx: int) -> Optional[str]:
         return cls.convert_agate_type(agate_table, col_idx)
 
     @classmethod
-    def convert_agate_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> Optional[str]:
+    def convert_agate_type(cls, agate_table: agate.Table, col_idx: int) -> Optional[str]:
         agate_type: Type = agate_table.column_types[col_idx]
         conversions: List[Tuple[Type, Callable[..., str]]] = [
             (agate.Text, cls.convert_text_type),
@@ -960,42 +889,44 @@ def execute_macro(
             context_override = {}
 
         if manifest is None:
-            manifest = self._macro_manifest
-
-        macro = manifest.find_macro_by_name(
+            # TODO CT-211
+            manifest = self._macro_manifest  # type: ignore[assignment]
+        # TODO CT-211
+        macro = manifest.find_macro_by_name(  # type: ignore[union-attr]
             macro_name, self.config.project_name, project
         )
         if macro is None:
             if project is None:
-                package_name = 'any package'
+                package_name = "any package"
             else:
                 package_name = 'the "{}" package'.format(project)
 
             raise RuntimeException(
-                'dbt could not find a macro with the name "{}" in {}'
-                .format(macro_name, package_name)
+                'dbt could not find a macro with the name "{}" in {}'.format(
+                    macro_name, package_name
+                )
             )
         # This causes a reference cycle, as generate_runtime_macro_context()
         # ends up calling get_adapter, so the import has to be here.
         from dbt.context.providers import generate_runtime_macro_context
+
         macro_context = generate_runtime_macro_context(
+            # TODO CT-211
             macro=macro,
             config=self.config,
-            manifest=manifest,
-            package_name=project
+            manifest=manifest,  # type: ignore[arg-type]
+            package_name=project,
         )
         macro_context.update(context_override)
 
         macro_function = MacroGenerator(macro, macro_context)
 
-        with self.connections.exception_handler(f'macro {macro_name}'):
+        with self.connections.exception_handler(f"macro {macro_name}"):
             result = macro_function(**kwargs)
         return result
 
     @classmethod
-    def _catalog_filter_table(
-        cls, table: agate.Table, manifest: Manifest
-    ) -> agate.Table:
+    def _catalog_filter_table(cls, table: agate.Table, manifest: Manifest) -> agate.Table:
         """Filter the table as appropriate for catalog entries. Subclasses can
         override this to change filtering rules on a per-adapter basis.
         """
@@ -1003,7 +934,7 @@ def _catalog_filter_table(
         table = table_from_rows(
             table.rows,
             table.column_names,
-            text_only_columns=['table_database', 'table_schema', 'table_name']
+            text_only_columns=["table_database", "table_schema", "table_name"],
         )
         return table.where(_catalog_filter_schemas(manifest))
 
@@ -1014,10 +945,7 @@ def _get_one_catalog(
         manifest: Manifest,
     ) -> agate.Table:
 
-        kwargs = {
-            'information_schema': information_schema,
-            'schemas': schemas
-        }
+        kwargs = {"information_schema": information_schema, "schemas": schemas}
         table = self.execute_macro(
             GET_CATALOG_MACRO_NAME,
             kwargs=kwargs,
@@ -1029,9 +957,7 @@ def _get_one_catalog(
         results = self._catalog_filter_table(table, manifest)
         return results
 
-    def get_catalog(
-        self, manifest: Manifest
-    ) -> Tuple[agate.Table, List[Exception]]:
+    def get_catalog(self, manifest: Manifest) -> Tuple[agate.Table, List[Exception]]:
         schema_map = self._get_catalog_schemas(manifest)
 
         with executor(self.config) as tpe:
@@ -1039,14 +965,10 @@ def get_catalog(
             for info, schemas in schema_map.items():
                 if len(schemas) == 0:
                     continue
-                name = '.'.join([
-                    str(info.database),
-                    'information_schema'
-                ])
+                name = ".".join([str(info.database), "information_schema"])
 
                 fut = tpe.submit_connected(
-                    self, name,
-                    self._get_one_catalog, info, schemas, manifest
+                    self, name, self._get_one_catalog, info, schemas, manifest
                 )
                 futures.append(fut)
 
@@ -1063,21 +985,17 @@ def calculate_freshness(
         source: BaseRelation,
         loaded_at_field: str,
         filter: Optional[str],
-        manifest: Optional[Manifest] = None
+        manifest: Optional[Manifest] = None,
     ) -> Dict[str, Any]:
         """Calculate the freshness of sources in dbt, and return it"""
         kwargs: Dict[str, Any] = {
-            'source': source,
-            'loaded_at_field': loaded_at_field,
-            'filter': filter,
+            "source": source,
+            "loaded_at_field": loaded_at_field,
+            "filter": filter,
         }
 
         # run the macro
-        table = self.execute_macro(
-            FRESHNESS_MACRO_NAME,
-            kwargs=kwargs,
-            manifest=manifest
-        )
+        table = self.execute_macro(FRESHNESS_MACRO_NAME, kwargs=kwargs, manifest=manifest)
         # now we have a 1-row table of the maximum `loaded_at_field` value and
         # the current time according to the db.
         if len(table) != 1 or len(table[0]) != 2:
@@ -1096,9 +1014,9 @@ def calculate_freshness(
         snapshotted_at = _utc(table[0][1], source, loaded_at_field)
         age = (snapshotted_at - max_loaded_at).total_seconds()
         return {
-            'max_loaded_at': max_loaded_at,
-            'snapshotted_at': snapshotted_at,
-            'age': age,
+            "max_loaded_at": max_loaded_at,
+            "snapshotted_at": snapshotted_at,
+            "age": age,
         }
 
     def pre_model_hook(self, config: Mapping[str, Any]) -> Any:
@@ -1128,6 +1046,7 @@ def post_model_hook(self, config: Mapping[str, Any], context: Any) -> None:
 
     def get_compiler(self):
         from dbt.compilation import Compiler
+
         return Compiler(self.config)
 
     # Methods used in adapter tests
@@ -1138,14 +1057,12 @@ def update_column_sql(
         clause: str,
         where_clause: Optional[str] = None,
     ) -> str:
-        clause = f'update {dst_name} set {dst_column} = {clause}'
+        clause = f"update {dst_name} set {dst_column} = {clause}"
         if where_clause is not None:
-            clause += f' where {where_clause}'
+            clause += f" where {where_clause}"
         return clause
 
-    def timestamp_add_sql(
-        self, add_to: str, number: int = 1, interval: str = 'hour'
-    ) -> str:
+    def timestamp_add_sql(self, add_to: str, number: int = 1, interval: str = "hour") -> str:
         # for backwards compatibility, we're compelled to set some sort of
         # default. A lot of searching has lead me to believe that the
         # '+ interval' syntax used in postgres/redshift is relatively common
@@ -1153,23 +1070,24 @@ def timestamp_add_sql(
         return f"{add_to} + interval '{number} {interval}'"
 
     def string_add_sql(
-        self, add_to: str, value: str, location='append',
+        self,
+        add_to: str,
+        value: str,
+        location="append",
     ) -> str:
-        if location == 'append':
+        if location == "append":
             return f"{add_to} || '{value}'"
-        elif location == 'prepend':
+        elif location == "prepend":
             return f"'{value}' || {add_to}"
         else:
-            raise RuntimeException(
-                f'Got an unexpected location value of "{location}"'
-            )
+            raise RuntimeException(f'Got an unexpected location value of "{location}"')
 
     def get_rows_different_sql(
         self,
         relation_a: BaseRelation,
         relation_b: BaseRelation,
         column_names: Optional[List[str]] = None,
-        except_operator: str = 'EXCEPT',
+        except_operator: str = "EXCEPT",
     ) -> str:
         """Generate SQL for a query that returns a single row with a two
         columns: the number of rows that are different between the two
@@ -1182,7 +1100,7 @@ def get_rows_different_sql(
             names = sorted((self.quote(c.name) for c in columns))
         else:
             names = sorted((self.quote(n) for n in column_names))
-        columns_csv = ', '.join(names)
+        columns_csv = ", ".join(names)
 
         sql = COLUMNS_EQUAL_SQL.format(
             columns=columns_csv,
@@ -1194,7 +1112,7 @@ def get_rows_different_sql(
         return sql
 
 
-COLUMNS_EQUAL_SQL = '''
+COLUMNS_EQUAL_SQL = """
 with diff_count as (
     SELECT
         1 as id,
@@ -1220,11 +1138,11 @@ def get_rows_different_sql(
     diff_count.num_missing as num_mismatched
 from row_count_diff
 join diff_count using (id)
-'''.strip()
+""".strip()
 
 
 def catch_as_completed(
-    futures  # typing: List[Future[agate.Table]]
+    futures,  # typing: List[Future[agate.Table]]
 ) -> Tuple[agate.Table, List[Exception]]:
 
     # catalogs: agate.Table = agate.Table(rows=[])
@@ -1237,15 +1155,10 @@ def catch_as_completed(
         if exc is None:
             catalog = future.result()
             tables.append(catalog)
-        elif (
-            isinstance(exc, KeyboardInterrupt) or
-            not isinstance(exc, Exception)
-        ):
+        elif isinstance(exc, KeyboardInterrupt) or not isinstance(exc, Exception):
             raise exc
         else:
-            warn_or_error(
-                f'Encountered an error while generating catalog: {str(exc)}'
-            )
+            warn_or_error(f"Encountered an error while generating catalog: {str(exc)}")
             # exc is not None, derives from Exception, and isn't ctrl+c
             exceptions.append(exc)
     return merge_tables(tables), exceptions
diff --git a/core/dbt/adapters/base/meta.py b/core/dbt/adapters/base/meta.py
index 209240c0de7..de35a4f826a 100644
--- a/core/dbt/adapters/base/meta.py
+++ b/core/dbt/adapters/base/meta.py
@@ -30,9 +30,11 @@ def my_other_method(self, a, b):
             x.update(big_expensive_db_query())
             return x
         """
+
         def inner(func):
             func._parse_replacement_ = parse_replacement
             return self(func)
+
         return inner
 
     def deprecated(
@@ -57,13 +59,14 @@ def my_old_slow_method(self, arg):
         The optional parse_replacement, if provided, will provide a parse-time
         replacement for the actual method (see `available.parse`).
         """
+
         def wrapper(func):
             func_name = func.__name__
             renamed_method(func_name, supported_name)
 
             @wraps(func)
             def inner(*args, **kwargs):
-                warn('adapter:{}'.format(func_name))
+                warn("adapter:{}".format(func_name))
                 return func(*args, **kwargs)
 
             if parse_replacement:
@@ -71,6 +74,7 @@ def inner(*args, **kwargs):
             else:
                 available_function = self
             return available_function(inner)
+
         return wrapper
 
     def parse_none(self, func: Callable) -> Callable:
@@ -95,9 +99,7 @@ def __new__(mcls, name, bases, namespace, **kwargs):
         # I'm not sure there is any benefit to it after poking around a bit,
         # but having it doesn't hurt on the python side (and omitting it could
         # hurt for obscure metaclass reasons, for all I know)
-        cls = abc.ABCMeta.__new__(  # type: ignore
-            mcls, name, bases, namespace, **kwargs
-        )
+        cls = abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)  # type: ignore
 
         # this is very much inspired by ABCMeta's own implementation
 
@@ -109,14 +111,14 @@ def __new__(mcls, name, bases, namespace, **kwargs):
 
         # collect base class data first
         for base in bases:
-            available.update(getattr(base, '_available_', set()))
-            replacements.update(getattr(base, '_parse_replacements_', set()))
+            available.update(getattr(base, "_available_", set()))
+            replacements.update(getattr(base, "_parse_replacements_", set()))
 
         # override with local data if it exists
         for name, value in namespace.items():
-            if getattr(value, '_is_available_', False):
+            if getattr(value, "_is_available_", False):
                 available.add(name)
-            parse_replacement = getattr(value, '_parse_replacement_', None)
+            parse_replacement = getattr(value, "_parse_replacement_", None)
             if parse_replacement is not None:
                 replacements[name] = parse_replacement
 
diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py
index c87b2a26a91..f0d348d8f57 100644
--- a/core/dbt/adapters/base/plugin.py
+++ b/core/dbt/adapters/base/plugin.py
@@ -8,11 +8,10 @@
 def project_name_from_path(include_path: str) -> str:
     # avoid an import cycle
     from dbt.config.project import Project
+
     partial = Project.partial_load(include_path)
     if partial.project_name is None:
-        raise CompilationException(
-            f'Invalid project at {include_path}: name not set!'
-        )
+        raise CompilationException(f"Invalid project at {include_path}: name not set!")
     return partial.project_name
 
 
@@ -23,12 +22,13 @@ class AdapterPlugin:
     :param dependencies: A list of adapter names that this adapter depends
         upon.
     """
+
     def __init__(
         self,
         adapter: Type[AdapterProtocol],
         credentials: Type[Credentials],
         include_path: str,
-        dependencies: Optional[List[str]] = None
+        dependencies: Optional[List[str]] = None,
     ):
 
         self.adapter: Type[AdapterProtocol] = adapter
diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py
index 49c564ffbe4..26f34be9c93 100644
--- a/core/dbt/adapters/base/query_headers.py
+++ b/core/dbt/adapters/base/query_headers.py
@@ -15,7 +15,7 @@ def __init__(self, node):
         self._inner_node = node
 
     def __getattr__(self, name):
-        return getattr(self._inner_node, name, '')
+        return getattr(self._inner_node, name, "")
 
 
 class _QueryComment(local):
@@ -24,6 +24,7 @@ class _QueryComment(local):
         - the current thread's query comment.
         - a source_name indicating what set the current thread's query comment
     """
+
     def __init__(self, initial):
         self.query_comment: Optional[str] = initial
         self.append = False
@@ -35,21 +36,19 @@ def add(self, sql: str) -> str:
         if self.append:
             # replace last ';' with ';'
             sql = sql.rstrip()
-            if sql[-1] == ';':
+            if sql[-1] == ";":
                 sql = sql[:-1]
-                return '{}\n/* {} */;'.format(sql, self.query_comment.strip())
+                return "{}\n/* {} */;".format(sql, self.query_comment.strip())
 
-            return '{}\n/* {} */'.format(sql, self.query_comment.strip())
+            return "{}\n/* {} */".format(sql, self.query_comment.strip())
 
-        return '/* {} */\n{}'.format(self.query_comment.strip(), sql)
+        return "/* {} */\n{}".format(self.query_comment.strip(), sql)
 
     def set(self, comment: Optional[str], append: bool):
-        if isinstance(comment, str) and '*/' in comment:
+        if isinstance(comment, str) and "*/" in comment:
             # tell the user "no" so they don't hurt themselves by writing
             # garbage
-            raise RuntimeException(
-                f'query comment contains illegal value "*/": {comment}'
-            )
+            raise RuntimeException(f'query comment contains illegal value "*/": {comment}')
         self.query_comment = comment
         self.append = append
 
@@ -63,15 +62,17 @@ def __init__(self, config: AdapterRequiredConfig, manifest: Manifest):
         self.config = config
 
         comment_macro = self._get_comment_macro()
-        self.generator: QueryStringFunc = lambda name, model: ''
+        self.generator: QueryStringFunc = lambda name, model: ""
         # if the comment value was None or the empty string, just skip it
         if comment_macro:
             assert isinstance(comment_macro, str)
-            macro = '\n'.join((
-                '{%- macro query_comment_macro(connection_name, node) -%}',
-                comment_macro,
-                '{% endmacro %}'
-            ))
+            macro = "\n".join(
+                (
+                    "{%- macro query_comment_macro(connection_name, node) -%}",
+                    comment_macro,
+                    "{% endmacro %}",
+                )
+            )
             ctx = self._get_context()
             self.generator = QueryStringGenerator(macro, ctx)
         self.comment = _QueryComment(None)
@@ -87,7 +88,7 @@ def add(self, sql: str) -> str:
         return self.comment.add(sql)
 
     def reset(self):
-        self.set('master', None)
+        self.set("master", None)
 
     def set(self, name: str, node: Optional[CompileResultNode]):
         wrapped: Optional[NodeWrapper] = None
diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py
index 672348f4285..3124384975a 100644
--- a/core/dbt/adapters/base/relation.py
+++ b/core/dbt/adapters/base/relation.py
@@ -1,13 +1,16 @@
 from collections.abc import Hashable
 from dataclasses import dataclass
-from typing import (
-    Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
-)
+from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set
 
 from dbt.contracts.graph.compiled import CompiledNode
 from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode
 from dbt.contracts.relation import (
-    RelationType, ComponentName, HasQuoting, FakeAPIObject, Policy, Path
+    RelationType,
+    ComponentName,
+    HasQuoting,
+    FakeAPIObject,
+    Policy,
+    Path,
 )
 from dbt.exceptions import InternalException
 from dbt.node_types import NodeType
@@ -16,7 +19,7 @@
 import dbt.exceptions
 
 
-Self = TypeVar('Self', bound='BaseRelation')
+Self = TypeVar("Self", bound="BaseRelation")
 
 
 @dataclass(frozen=True, eq=False, repr=False)
@@ -40,7 +43,7 @@ def _get_field_named(cls, field_name):
             if field.name == field_name:
                 return field
         # this should be unreachable
-        raise ValueError(f'BaseRelation has no {field_name} field!')
+        raise ValueError(f"BaseRelation has no {field_name} field!")
 
     def __eq__(self, other):
         if not isinstance(other, self.__class__):
@@ -49,20 +52,18 @@ def __eq__(self, other):
 
     @classmethod
     def get_default_quote_policy(cls) -> Policy:
-        return cls._get_field_named('quote_policy').default
+        return cls._get_field_named("quote_policy").default
 
     @classmethod
     def get_default_include_policy(cls) -> Policy:
-        return cls._get_field_named('include_policy').default
+        return cls._get_field_named("include_policy").default
 
     def get(self, key, default=None):
         """Override `.get` to return a metadata object so we don't break
         dbt_utils.
         """
-        if key == 'metadata':
-            return {
-                'type': self.__class__.__name__
-            }
+        if key == "metadata":
+            return {"type": self.__class__.__name__}
         return super().get(key, default)
 
     def matches(
@@ -71,16 +72,19 @@ def matches(
         schema: Optional[str] = None,
         identifier: Optional[str] = None,
     ) -> bool:
-        search = filter_null_values({
-            ComponentName.Database: database,
-            ComponentName.Schema: schema,
-            ComponentName.Identifier: identifier
-        })
+        search = filter_null_values(
+            {
+                ComponentName.Database: database,
+                ComponentName.Schema: schema,
+                ComponentName.Identifier: identifier,
+            }
+        )
 
         if not search:
             # nothing was passed in
             raise dbt.exceptions.RuntimeException(
-                "Tried to match relation, but no search path was passed!")
+                "Tried to match relation, but no search path was passed!"
+            )
 
         exact_match = True
         approximate_match = True
@@ -88,17 +92,13 @@ def matches(
         for k, v in search.items():
             if not self._is_exactish_match(k, v):
                 exact_match = False
-
-            if (
-                self.path.get_lowered_part(k).strip(self.quote_character) !=
-                v.lower().strip(self.quote_character)
+            if str(self.path.get_lowered_part(k)).strip(self.quote_character) != v.lower().strip(
+                self.quote_character
             ):
-                approximate_match = False
+                approximate_match = False  # type: ignore[union-attr]
 
         if approximate_match and not exact_match:
-            target = self.create(
-                database=database, schema=schema, identifier=identifier
-            )
+            target = self.create(database=database, schema=schema, identifier=identifier)
             dbt.exceptions.approximate_relation_match(target, self)
 
         return exact_match
@@ -112,11 +112,13 @@ def quote(
         schema: Optional[bool] = None,
         identifier: Optional[bool] = None,
     ) -> Self:
-        policy = filter_null_values({
-            ComponentName.Database: database,
-            ComponentName.Schema: schema,
-            ComponentName.Identifier: identifier
-        })
+        policy = filter_null_values(
+            {
+                ComponentName.Database: database,
+                ComponentName.Schema: schema,
+                ComponentName.Identifier: identifier,
+            }
+        )
 
         new_quote_policy = self.quote_policy.replace_dict(policy)
         return self.replace(quote_policy=new_quote_policy)
@@ -127,16 +129,18 @@ def include(
         schema: Optional[bool] = None,
         identifier: Optional[bool] = None,
     ) -> Self:
-        policy = filter_null_values({
-            ComponentName.Database: database,
-            ComponentName.Schema: schema,
-            ComponentName.Identifier: identifier
-        })
+        policy = filter_null_values(
+            {
+                ComponentName.Database: database,
+                ComponentName.Schema: schema,
+                ComponentName.Identifier: identifier,
+            }
+        )
 
         new_include_policy = self.include_policy.replace_dict(policy)
         return self.replace(include_policy=new_include_policy)
 
-    def information_schema(self, view_name=None) -> 'InformationSchema':
+    def information_schema(self, view_name=None) -> "InformationSchema":
         # some of our data comes from jinja, where things can be `Undefined`.
         if not isinstance(view_name, str):
             view_name = None
@@ -146,10 +150,10 @@ def information_schema(self, view_name=None) -> 'InformationSchema':
         info_schema = InformationSchema.from_relation(self, view_name)
         return info_schema.incorporate(path={"schema": None})
 
-    def information_schema_only(self) -> 'InformationSchema':
+    def information_schema_only(self) -> "InformationSchema":
         return self.information_schema()
 
-    def without_identifier(self) -> 'BaseRelation':
+    def without_identifier(self) -> "BaseRelation":
         """Return a form of this relation that only has the database and schema
         set to included. To get the appropriately-quoted form the schema out of
         the result (for use as part of a query), use `.render()`. To get the
@@ -159,9 +163,7 @@ def without_identifier(self) -> 'BaseRelation':
         """
         return self.include(identifier=False).replace_path(identifier=None)
 
-    def _render_iterator(
-        self
-    ) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
+    def _render_iterator(self) -> Iterator[Tuple[Optional[ComponentName], Optional[str]]]:
 
         for key in ComponentName:
             path_part: Optional[str] = None
@@ -173,27 +175,22 @@ def _render_iterator(
 
     def render(self) -> str:
         # if there is nothing set, this will return the empty string.
-        return '.'.join(
-            part for _, part in self._render_iterator()
-            if part is not None
-        )
+        return ".".join(part for _, part in self._render_iterator() if part is not None)
 
     def quoted(self, identifier):
-        return '{quote_char}{identifier}{quote_char}'.format(
+        return "{quote_char}{identifier}{quote_char}".format(
             quote_char=self.quote_character,
             identifier=identifier,
         )
 
     @classmethod
-    def create_from_source(
-        cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any
-    ) -> Self:
+    def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self:
         source_quoting = source.quoting.to_dict(omit_none=True)
-        source_quoting.pop('column', None)
+        source_quoting.pop("column", None)
         quote_policy = deep_merge(
             cls.get_default_quote_policy().to_dict(omit_none=True),
             source_quoting,
-            kwargs.get('quote_policy', {}),
+            kwargs.get("quote_policy", {}),
         )
 
         return cls.create(
@@ -201,12 +198,12 @@ def create_from_source(
             schema=source.schema,
             identifier=source.identifier,
             quote_policy=quote_policy,
-            **kwargs
+            **kwargs,
         )
 
     @staticmethod
     def add_ephemeral_prefix(name: str):
-        return f'__dbt__cte__{name}'
+        return f"__dbt__cte__{name}"
 
     @classmethod
     def create_ephemeral_from_node(
@@ -239,7 +236,8 @@ def create_from_node(
             schema=node.schema,
             identifier=node.alias,
             quote_policy=quote_policy,
-            **kwargs)
+            **kwargs,
+        )
 
     @classmethod
     def create_from(
@@ -251,15 +249,14 @@ def create_from(
         if node.resource_type == NodeType.Source:
             if not isinstance(node, ParsedSourceDefinition):
                 raise InternalException(
-                    'type mismatch, expected ParsedSourceDefinition but got {}'
-                    .format(type(node))
+                    "type mismatch, expected ParsedSourceDefinition but got {}".format(type(node))
                 )
             return cls.create_from_source(node, **kwargs)
         else:
             if not isinstance(node, (ParsedNode, CompiledNode)):
                 raise InternalException(
-                    'type mismatch, expected ParsedNode or CompiledNode but '
-                    'got {}'.format(type(node))
+                    "type mismatch, expected ParsedNode or CompiledNode but "
+                    "got {}".format(type(node))
                 )
             return cls.create_from_node(config, node, **kwargs)
 
@@ -272,14 +269,16 @@ def create(
         type: Optional[RelationType] = None,
         **kwargs,
     ) -> Self:
-        kwargs.update({
-            'path': {
-                'database': database,
-                'schema': schema,
-                'identifier': identifier,
-            },
-            'type': type,
-        })
+        kwargs.update(
+            {
+                "path": {
+                    "database": database,
+                    "schema": schema,
+                    "identifier": identifier,
+                },
+                "type": type,
+            }
+        )
         return cls.from_dict(kwargs)
 
     def __repr__(self) -> str:
@@ -345,7 +344,7 @@ def get_relation_type(cls) -> Type[RelationType]:
         return RelationType
 
 
-Info = TypeVar('Info', bound='InformationSchema')
+Info = TypeVar("Info", bound="InformationSchema")
 
 
 @dataclass(frozen=True, eq=False, repr=False)
@@ -355,17 +354,15 @@ class InformationSchema(BaseRelation):
     def __post_init__(self):
         if not isinstance(self.information_schema_view, (type(None), str)):
             raise dbt.exceptions.CompilationException(
-                'Got an invalid name: {}'.format(self.information_schema_view)
+                "Got an invalid name: {}".format(self.information_schema_view)
             )
 
     @classmethod
-    def get_path(
-        cls, relation: BaseRelation, information_schema_view: Optional[str]
-    ) -> Path:
+    def get_path(cls, relation: BaseRelation, information_schema_view: Optional[str]) -> Path:
         return Path(
             database=relation.database,
             schema=relation.schema,
-            identifier='INFORMATION_SCHEMA',
+            identifier="INFORMATION_SCHEMA",
         )
 
     @classmethod
@@ -396,9 +393,7 @@ def from_relation(
         relation: BaseRelation,
         information_schema_view: Optional[str],
     ) -> Info:
-        include_policy = cls.get_include_policy(
-            relation, information_schema_view
-        )
+        include_policy = cls.get_include_policy(relation, information_schema_view)
         quote_policy = cls.get_quote_policy(relation, information_schema_view)
         path = cls.get_path(relation, information_schema_view)
         return cls(
@@ -420,6 +415,7 @@ class SchemaSearchMap(Dict[InformationSchema, Set[Optional[str]]]):
     search for what schemas. The schema values are all lowercased to avoid
     duplication.
     """
+
     def add(self, relation: BaseRelation):
         key = relation.information_schema_only()
         if key not in self:
@@ -429,9 +425,7 @@ def add(self, relation: BaseRelation):
             schema = relation.schema.lower()
         self[key].add(schema)
 
-    def search(
-        self
-    ) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
+    def search(self) -> Iterator[Tuple[InformationSchema, Optional[str]]]:
         for information_schema_name, schemas in self.items():
             for schema in schemas:
                 yield information_schema_name, schema
@@ -446,14 +440,13 @@ def flatten(self, allow_multiple_databases: bool = False):
                 dbt.exceptions.raise_compiler_error(str(seen))
 
         for information_schema_name, schema in self.search():
-            path = {
-                'database': information_schema_name.database,
-                'schema': schema
-            }
-            new.add(information_schema_name.incorporate(
-                path=path,
-                quote_policy={'database': False},
-                include_policy={'database': False},
-            ))
+            path = {"database": information_schema_name.database, "schema": schema}
+            new.add(
+                information_schema_name.incorporate(
+                    path=path,
+                    quote_policy={"database": False},
+                    include_policy={"database": False},
+                )
+            )
 
         return new
diff --git a/core/dbt/adapters/cache.py b/core/dbt/adapters/cache.py
index 1aa3805fb00..593bd16851b 100644
--- a/core/dbt/adapters/cache.py
+++ b/core/dbt/adapters/cache.py
@@ -18,7 +18,7 @@
     RenameSchema,
     TemporaryRelation,
     UncachedRelation,
-    UpdateReference
+    UpdateReference,
 )
 from dbt.utils import lowercase
 from dbt.helper_types import Lazy
@@ -29,7 +29,7 @@ def dot_separated(key: _ReferenceKey) -> str:
 
     :param _ReferenceKey key: The key to stringify.
     """
-    return '.'.join(map(str, key))
+    return ".".join(map(str, key))
 
 
 class _CachedRelation:
@@ -41,14 +41,15 @@ class _CachedRelation:
         that refer to this relation.
     :attr BaseRelation inner: The underlying dbt relation.
     """
+
     def __init__(self, inner):
         self.referenced_by = {}
         self.inner = inner
 
     def __str__(self) -> str:
-        return (
-            '_CachedRelation(database={}, schema={}, identifier={}, inner={})'
-        ).format(self.database, self.schema, self.identifier, self.inner)
+        return ("_CachedRelation(database={}, schema={}, identifier={}, inner={})").format(
+            self.database, self.schema, self.identifier, self.inner
+        )
 
     @property
     def database(self) -> Optional[str]:
@@ -82,7 +83,7 @@ def key(self):
         """
         return _make_key(self)
 
-    def add_reference(self, referrer: '_CachedRelation'):
+    def add_reference(self, referrer: "_CachedRelation"):
         """Add a reference from referrer to self, indicating that if this node
         were drop...cascaded, the referrer would be dropped as well.
 
@@ -126,9 +127,9 @@ def rename(self, new_relation):
         # table_name is ever anything but the identifier (via .create())
         self.inner = self.inner.incorporate(
             path={
-                'database': new_relation.inner.database,
-                'schema': new_relation.inner.schema,
-                'identifier': new_relation.inner.identifier
+                "database": new_relation.inner.database,
+                "schema": new_relation.inner.schema,
+                "identifier": new_relation.inner.identifier,
             },
         )
 
@@ -144,8 +145,9 @@ def rename_key(self, old_key, new_key):
         """
         if new_key in self.referenced_by:
             dbt.exceptions.raise_cache_inconsistent(
-                'in rename of "{}" -> "{}", new name is in the cache already'
-                .format(old_key, new_key)
+                'in rename of "{}" -> "{}", new name is in the cache already'.format(
+                    old_key, new_key
+                )
             )
 
         if old_key not in self.referenced_by:
@@ -170,13 +172,16 @@ class RelationsCache:
         The adapters also hold this lock while filling the cache.
     :attr Set[str] schemas: The set of known/cached schemas, all lowercased.
     """
+
     def __init__(self) -> None:
         self.relations: Dict[_ReferenceKey, _CachedRelation] = {}
         self.lock = threading.RLock()
         self.schemas: Set[Tuple[Optional[str], Optional[str]]] = set()
 
     def add_schema(
-        self, database: Optional[str], schema: Optional[str],
+        self,
+        database: Optional[str],
+        schema: Optional[str],
     ) -> None:
         """Add a schema to the set of known schemas (case-insensitive)
 
@@ -186,7 +191,9 @@ def add_schema(
         self.schemas.add((lowercase(database), lowercase(schema)))
 
     def drop_schema(
-        self, database: Optional[str], schema: Optional[str],
+        self,
+        database: Optional[str],
+        schema: Optional[str],
     ) -> None:
         """Drop the given schema and remove it from the set of known schemas.
 
@@ -230,10 +237,7 @@ def dump_graph(self):
         # self.relations or any cache entry's referenced_by during iteration
         # it's a runtime error!
         with self.lock:
-            return {
-                dot_separated(k): v.dump_graph_entry()
-                for k, v in self.relations.items()
-            }
+            return {dot_separated(k): v.dump_graph_entry() for k, v in self.relations.items()}
 
     def _setdefault(self, relation: _CachedRelation):
         """Add a relation to the cache, or return it if it already exists.
@@ -261,15 +265,13 @@ def _add_link(self, referenced_key, dependent_key):
             return
         if referenced is None:
             dbt.exceptions.raise_cache_inconsistent(
-                'in add_link, referenced link key {} not in cache!'
-                .format(referenced_key)
+                "in add_link, referenced link key {} not in cache!".format(referenced_key)
             )
 
         dependent = self.relations.get(dependent_key)
         if dependent is None:
             dbt.exceptions.raise_cache_inconsistent(
-                'in add_link, dependent link key {} not in cache!'
-                .format(dependent_key)
+                "in add_link, dependent link key {} not in cache!".format(dependent_key)
             )
 
         assert dependent is not None  # we just raised!
@@ -301,15 +303,11 @@ def add_link(self, referenced, dependent):
             return
         if ref_key not in self.relations:
             # Insert a dummy "external" relation.
-            referenced = referenced.replace(
-                type=referenced.External
-            )
+            referenced = referenced.replace(type=referenced.External)
             self.add(referenced)
         if dep_key not in self.relations:
             # Insert a dummy "external" relation.
-            dependent = dependent.replace(
-                type=referenced.External
-            )
+            dependent = dependent.replace(type=referenced.External)
             self.add(dependent)
         fire_event(AddLink(dep_key=dep_key, ref_key=ref_key))
         with self.lock:
@@ -416,8 +414,9 @@ def _check_rename_constraints(self, old_key, new_key):
         """
         if new_key in self.relations:
             dbt.exceptions.raise_cache_inconsistent(
-                'in rename, new key {} already in cache: {}'
-                .format(new_key, list(self.relations.keys()))
+                "in rename, new key {} already in cache: {}".format(
+                    new_key, list(self.relations.keys())
+                )
             )
 
         if old_key not in self.relations:
@@ -451,9 +450,7 @@ def rename(self, old, new):
 
         fire_event(DumpAfterRenameSchema(dump=Lazy.defer(lambda: self.dump_graph())))
 
-    def get_relations(
-        self, database: Optional[str], schema: Optional[str]
-    ) -> List[Any]:
+    def get_relations(self, database: Optional[str], schema: Optional[str]) -> List[Any]:
         """Case-insensitively yield all relations matching the given schema.
 
         :param str schema: The case-insensitive schema name to list from.
@@ -464,14 +461,14 @@ def get_relations(
         schema = lowercase(schema)
         with self.lock:
             results = [
-                r.inner for r in self.relations.values()
-                if (lowercase(r.schema) == schema and
-                    lowercase(r.database) == database)
+                r.inner
+                for r in self.relations.values()
+                if (lowercase(r.schema) == schema and lowercase(r.database) == database)
             ]
 
         if None in results:
             dbt.exceptions.raise_cache_inconsistent(
-                'in get_relations, a None relation was found in the cache!'
+                "in get_relations, a None relation was found in the cache!"
             )
         return results
 
diff --git a/core/dbt/adapters/factory.py b/core/dbt/adapters/factory.py
index 64021f10536..c89204e0bdb 100644
--- a/core/dbt/adapters/factory.py
+++ b/core/dbt/adapters/factory.py
@@ -49,9 +49,7 @@ def get_relation_class_by_name(self, name: str) -> Type[RelationProtocol]:
         adapter = self.get_adapter_class_by_name(name)
         return adapter.Relation
 
-    def get_config_class_by_name(
-        self, name: str
-    ) -> Type[AdapterConfig]:
+    def get_config_class_by_name(self, name: str) -> Type[AdapterConfig]:
         adapter = self.get_adapter_class_by_name(name)
         return adapter.AdapterSpecificConfigs
 
@@ -61,13 +59,13 @@ def load_plugin(self, name: str) -> Type[Credentials]:
         # singletons
         try:
             # mypy doesn't think modules have any attributes.
-            mod: Any = import_module('.' + name, 'dbt.adapters')
+            mod: Any = import_module("." + name, "dbt.adapters")
         except ModuleNotFoundError as exc:
             # if we failed to import the target module in particular, inform
             # the user about it via a runtime error
-            if exc.name == 'dbt.adapters.' + name:
+            if exc.name == "dbt.adapters." + name:
                 fire_event(AdapterImportError(exc=exc))
-                raise RuntimeException(f'Could not find adapter type {name}!')
+                raise RuntimeException(f"Could not find adapter type {name}!")
             # otherwise, the error had to have come from some underlying
             # library. Log the stack trace.
 
@@ -78,8 +76,8 @@ def load_plugin(self, name: str) -> Type[Credentials]:
 
         if plugin_type != name:
             raise RuntimeException(
-                f'Expected to find adapter with type named {name}, got '
-                f'adapter with type {plugin_type}'
+                f"Expected to find adapter with type named {name}, got "
+                f"adapter with type {plugin_type}"
             )
 
         with self.lock:
@@ -109,8 +107,7 @@ def lookup_adapter(self, adapter_name: str) -> Adapter:
         return self.adapters[adapter_name]
 
     def reset_adapters(self):
-        """Clear the adapters. This is useful for tests, which change configs.
-        """
+        """Clear the adapters. This is useful for tests, which change configs."""
         with self.lock:
             for adapter in self.adapters.values():
                 adapter.cleanup_connections()
@@ -140,9 +137,7 @@ def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]:
             try:
                 plugin = self.plugins[plugin_name]
             except KeyError:
-                raise InternalException(
-                    f'No plugin found for {plugin_name}'
-                ) from None
+                raise InternalException(f"No plugin found for {plugin_name}") from None
             plugins.append(plugin)
             seen.add(plugin_name)
             if plugin.dependencies is None:
@@ -153,9 +148,7 @@ def get_adapter_plugins(self, name: Optional[str]) -> List[AdapterPlugin]:
         return plugins
 
     def get_adapter_package_names(self, name: Optional[str]) -> List[str]:
-        package_names: List[str] = [
-            p.project_name for p in self.get_adapter_plugins(name)
-        ]
+        package_names: List[str] = [p.project_name for p in self.get_adapter_plugins(name)]
         package_names.append(GLOBAL_PROJECT_NAME)
         return package_names
 
@@ -165,9 +158,7 @@ def get_include_paths(self, name: Optional[str]) -> List[Path]:
             try:
                 path = self.packages[package_name]
             except KeyError:
-                raise InternalException(
-                    f'No internal package listing found for {package_name}'
-                )
+                raise InternalException(f"No internal package listing found for {package_name}")
             paths.append(path)
         return paths
 
@@ -187,8 +178,7 @@ def get_adapter(config: AdapterRequiredConfig):
 
 
 def reset_adapters():
-    """Clear the adapters. This is useful for tests, which change configs.
-    """
+    """Clear the adapters. This is useful for tests, which change configs."""
     FACTORY.reset_adapters()
 
 
diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py
index e0731485ebc..3d2174867b1 100644
--- a/core/dbt/adapters/protocol.py
+++ b/core/dbt/adapters/protocol.py
@@ -1,18 +1,24 @@
 from dataclasses import dataclass
 from typing import (
-    Type, Hashable, Optional, ContextManager, List, Generic, TypeVar, ClassVar,
-    Tuple, Union, Dict, Any
+    Type,
+    Hashable,
+    Optional,
+    ContextManager,
+    List,
+    Generic,
+    TypeVar,
+    ClassVar,
+    Tuple,
+    Union,
+    Dict,
+    Any,
 )
 from typing_extensions import Protocol
 
 import agate
 
-from dbt.contracts.connection import (
-    Connection, AdapterRequiredConfig, AdapterResponse
-)
-from dbt.contracts.graph.compiled import (
-    CompiledNode, ManifestNode, NonSourceCompiledNode
-)
+from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse
+from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode
 from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition
 from dbt.contracts.graph.model_config import BaseConfig
 from dbt.contracts.graph.manifest import Manifest
@@ -34,7 +40,7 @@ class ColumnProtocol(Protocol):
     pass
 
 
-Self = TypeVar('Self', bound='RelationProtocol')
+Self = TypeVar("Self", bound="RelationProtocol")
 
 
 class RelationProtocol(Protocol):
@@ -64,22 +70,15 @@ def compile_node(
         ...
 
 
-AdapterConfig_T = TypeVar(
-    'AdapterConfig_T', bound=AdapterConfig
-)
-ConnectionManager_T = TypeVar(
-    'ConnectionManager_T', bound=ConnectionManagerProtocol
-)
-Relation_T = TypeVar(
-    'Relation_T', bound=RelationProtocol
-)
-Column_T = TypeVar(
-    'Column_T', bound=ColumnProtocol
-)
-Compiler_T = TypeVar('Compiler_T', bound=CompilerProtocol)
+AdapterConfig_T = TypeVar("AdapterConfig_T", bound=AdapterConfig)
+ConnectionManager_T = TypeVar("ConnectionManager_T", bound=ConnectionManagerProtocol)
+Relation_T = TypeVar("Relation_T", bound=RelationProtocol)
+Column_T = TypeVar("Column_T", bound=ColumnProtocol)
+Compiler_T = TypeVar("Compiler_T", bound=CompilerProtocol)
 
 
-class AdapterProtocol(
+# TODO CT-211
+class AdapterProtocol(  # type: ignore[misc]
     Protocol,
     Generic[
         AdapterConfig_T,
@@ -87,7 +86,7 @@ class AdapterProtocol(
         Relation_T,
         Column_T,
         Compiler_T,
-    ]
+    ],
 ):
     AdapterSpecificConfigs: ClassVar[Type[AdapterConfig_T]]
     Column: ClassVar[Type[Column_T]]
diff --git a/core/dbt/adapters/reference_keys.py b/core/dbt/adapters/reference_keys.py
index 5780e0d0beb..734b6845f5f 100644
--- a/core/dbt/adapters/reference_keys.py
+++ b/core/dbt/adapters/reference_keys.py
@@ -4,7 +4,7 @@
 from typing import Optional
 
 
-_ReferenceKey = namedtuple('_ReferenceKey', 'database schema identifier')
+_ReferenceKey = namedtuple("_ReferenceKey", "database schema identifier")
 
 
 def lowercase(value: Optional[str]) -> Optional[str]:
@@ -19,6 +19,6 @@ def _make_key(relation) -> _ReferenceKey:
     to keep track of quoting
     """
     # databases and schemas can both be None
-    return _ReferenceKey(lowercase(relation.database),
-                         lowercase(relation.schema),
-                         lowercase(relation.identifier))
+    return _ReferenceKey(
+        lowercase(relation.database), lowercase(relation.schema), lowercase(relation.identifier)
+    )
diff --git a/core/dbt/adapters/sql/connections.py b/core/dbt/adapters/sql/connections.py
index b3984e04253..8d8e083bb0c 100644
--- a/core/dbt/adapters/sql/connections.py
+++ b/core/dbt/adapters/sql/connections.py
@@ -7,9 +7,7 @@
 import dbt.clients.agate_helper
 import dbt.exceptions
 from dbt.adapters.base import BaseConnectionManager
-from dbt.contracts.connection import (
-    Connection, ConnectionState, AdapterResponse
-)
+from dbt.contracts.connection import Connection, ConnectionState, AdapterResponse
 from dbt.events.functions import fire_event
 from dbt.events.types import ConnectionUsed, SQLQuery, SQLCommit, SQLQueryStatus
 
@@ -23,11 +21,12 @@ class SQLConnectionManager(BaseConnectionManager):
         - get_response
         - open
     """
+
     @abc.abstractmethod
     def cancel(self, connection: Connection):
         """Cancel the given connection."""
         raise dbt.exceptions.NotImplementedException(
-            '`cancel` is not implemented for this adapter!'
+            "`cancel` is not implemented for this adapter!"
         )
 
     def cancel_open(self) -> List[str]:
@@ -40,10 +39,7 @@ def cancel_open(self) -> List[str]:
 
                 # if the connection failed, the handle will be None so we have
                 # nothing to cancel.
-                if (
-                    connection.handle is not None and
-                    connection.state == ConnectionState.OPEN
-                ):
+                if connection.handle is not None and connection.state == ConnectionState.OPEN:
                     self.cancel(connection)
                 if connection.name is not None:
                     names.append(connection.name)
@@ -54,7 +50,7 @@ def add_query(
         sql: str,
         auto_begin: bool = True,
         bindings: Optional[Any] = None,
-        abridge_sql_log: bool = False
+        abridge_sql_log: bool = False,
     ) -> Tuple[Connection, Any]:
         connection = self.get_thread_connection()
         if auto_begin and connection.transaction_open is False:
@@ -63,7 +59,7 @@ def add_query(
 
         with self.exception_handler(sql):
             if abridge_sql_log:
-                log_sql = '{}...'.format(sql[:512])
+                log_sql = "{}...".format(sql[:512])
             else:
                 log_sql = sql
 
@@ -75,8 +71,7 @@ def add_query(
 
             fire_event(
                 SQLQueryStatus(
-                    status=str(self.get_response(cursor)),
-                    elapsed=round((time.time() - pre), 2)
+                    status=str(self.get_response(cursor)), elapsed=round((time.time() - pre), 2)
                 )
             )
 
@@ -86,23 +81,26 @@ def add_query(
     def get_response(cls, cursor: Any) -> Union[AdapterResponse, str]:
         """Get the status of the cursor."""
         raise dbt.exceptions.NotImplementedException(
-            '`get_response` is not implemented for this adapter!'
+            "`get_response` is not implemented for this adapter!"
         )
 
     @classmethod
     def process_results(
-        cls,
-        column_names: Iterable[str],
-        rows: Iterable[Any]
+        cls, column_names: Iterable[str], rows: Iterable[Any]
     ) -> List[Dict[str, Any]]:
-        unique_col_names = dict()
-        for idx in range(len(column_names)):
-            col_name = column_names[idx]
+        # TODO CT-211
+        unique_col_names = dict()  # type: ignore[var-annotated]
+        # TODO CT-211
+        for idx in range(len(column_names)):  # type: ignore[arg-type]
+            # TODO CT-211
+            col_name = column_names[idx]  # type: ignore[index]
             if col_name in unique_col_names:
                 unique_col_names[col_name] += 1
-                column_names[idx] = f'{col_name}_{unique_col_names[col_name]}'
+                # TODO CT-211
+                column_names[idx] = f"{col_name}_{unique_col_names[col_name]}"  # type: ignore[index] # noqa
             else:
-                unique_col_names[column_names[idx]] = 1
+                # TODO CT-211
+                unique_col_names[column_names[idx]] = 1  # type: ignore[index]
         return [dict(zip(column_names, row)) for row in rows]
 
     @classmethod
@@ -115,10 +113,7 @@ def get_result_from_cursor(cls, cursor: Any) -> agate.Table:
             rows = cursor.fetchall()
             data = cls.process_results(column_names, rows)
 
-        return dbt.clients.agate_helper.table_from_data_flat(
-            data,
-            column_names
-        )
+        return dbt.clients.agate_helper.table_from_data_flat(data, column_names)
 
     def execute(
         self, sql: str, auto_begin: bool = False, fetch: bool = False
@@ -133,17 +128,18 @@ def execute(
         return response, table
 
     def add_begin_query(self):
-        return self.add_query('BEGIN', auto_begin=False)
+        return self.add_query("BEGIN", auto_begin=False)
 
     def add_commit_query(self):
-        return self.add_query('COMMIT', auto_begin=False)
+        return self.add_query("COMMIT", auto_begin=False)
 
     def begin(self):
         connection = self.get_thread_connection()
         if connection.transaction_open is True:
             raise dbt.exceptions.InternalException(
                 'Tried to begin a new transaction on connection "{}", but '
-                'it already had one open!'.format(connection.name))
+                "it already had one open!".format(connection.name)
+            )
 
         self.add_begin_query()
 
@@ -155,7 +151,8 @@ def commit(self):
         if connection.transaction_open is False:
             raise dbt.exceptions.InternalException(
                 'Tried to commit transaction on connection "{}", but '
-                'it does not have one open!'.format(connection.name))
+                "it does not have one open!".format(connection.name)
+            )
 
         fire_event(SQLCommit(conn_name=connection.name))
         self.add_commit_query()
diff --git a/core/dbt/adapters/sql/impl.py b/core/dbt/adapters/sql/impl.py
index 36de954dd09..1f4464c4f4f 100644
--- a/core/dbt/adapters/sql/impl.py
+++ b/core/dbt/adapters/sql/impl.py
@@ -13,16 +13,16 @@
 
 from dbt.adapters.base.relation import BaseRelation
 
-LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
-GET_COLUMNS_IN_RELATION_MACRO_NAME = 'get_columns_in_relation'
-LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
-CHECK_SCHEMA_EXISTS_MACRO_NAME = 'check_schema_exists'
-CREATE_SCHEMA_MACRO_NAME = 'create_schema'
-DROP_SCHEMA_MACRO_NAME = 'drop_schema'
-RENAME_RELATION_MACRO_NAME = 'rename_relation'
-TRUNCATE_RELATION_MACRO_NAME = 'truncate_relation'
-DROP_RELATION_MACRO_NAME = 'drop_relation'
-ALTER_COLUMN_TYPE_MACRO_NAME = 'alter_column_type'
+LIST_RELATIONS_MACRO_NAME = "list_relations_without_caching"
+GET_COLUMNS_IN_RELATION_MACRO_NAME = "get_columns_in_relation"
+LIST_SCHEMAS_MACRO_NAME = "list_schemas"
+CHECK_SCHEMA_EXISTS_MACRO_NAME = "check_schema_exists"
+CREATE_SCHEMA_MACRO_NAME = "create_schema"
+DROP_SCHEMA_MACRO_NAME = "drop_schema"
+RENAME_RELATION_MACRO_NAME = "rename_relation"
+TRUNCATE_RELATION_MACRO_NAME = "truncate_relation"
+DROP_RELATION_MACRO_NAME = "drop_relation"
+ALTER_COLUMN_TYPE_MACRO_NAME = "alter_column_type"
 
 
 class SQLAdapter(BaseAdapter):
@@ -63,30 +63,24 @@ def add_query(
         :param abridge_sql_log: If set, limit the raw sql logged to 512
             characters
         """
-        return self.connections.add_query(sql, auto_begin, bindings,
-                                          abridge_sql_log)
+        return self.connections.add_query(sql, auto_begin, bindings, abridge_sql_log)
 
     @classmethod
     def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         return "text"
 
     @classmethod
-    def convert_number_type(
-        cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
-        decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
+    def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
+        # TODO CT-211
+        decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))  # type: ignore[attr-defined]
         return "float8" if decimals else "integer"
 
     @classmethod
-    def convert_boolean_type(
-            cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_boolean_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         return "boolean"
 
     @classmethod
-    def convert_datetime_type(
-            cls, agate_table: agate.Table, col_idx: int
-    ) -> str:
+    def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str:
         return "timestamp without time zone"
 
     @classmethod
@@ -102,21 +96,14 @@ def is_cancelable(cls) -> bool:
         return True
 
     def expand_column_types(self, goal, current):
-        reference_columns = {
-            c.name: c for c in
-            self.get_columns_in_relation(goal)
-        }
+        reference_columns = {c.name: c for c in self.get_columns_in_relation(goal)}
 
-        target_columns = {
-            c.name: c for c
-            in self.get_columns_in_relation(current)
-        }
+        target_columns = {c.name: c for c in self.get_columns_in_relation(current)}
 
         for column_name, reference_column in reference_columns.items():
             target_column = target_columns.get(column_name)
 
-            if target_column is not None and \
-               target_column.can_expand_to(reference_column):
+            if target_column is not None and target_column.can_expand_to(reference_column):
                 col_string_size = reference_column.string_size()
                 new_type = self.Column.string_type(col_string_size)
                 fire_event(
@@ -129,9 +116,7 @@ def expand_column_types(self, goal, current):
 
                 self.alter_column_type(current, column_name, new_type)
 
-    def alter_column_type(
-            self, relation, column_name, new_column_type
-    ) -> None:
+    def alter_column_type(self, relation, column_name, new_column_type) -> None:
         """
         1. Create a new column (w/ temp name and correct type)
         2. Copy data over to it
@@ -139,53 +124,40 @@ def alter_column_type(
         4. Rename the new column to existing column
         """
         kwargs = {
-            'relation': relation,
-            'column_name': column_name,
-            'new_column_type': new_column_type,
+            "relation": relation,
+            "column_name": column_name,
+            "new_column_type": new_column_type,
         }
-        self.execute_macro(
-            ALTER_COLUMN_TYPE_MACRO_NAME,
-            kwargs=kwargs
-        )
+        self.execute_macro(ALTER_COLUMN_TYPE_MACRO_NAME, kwargs=kwargs)
 
     def drop_relation(self, relation):
         if relation.type is None:
             dbt.exceptions.raise_compiler_error(
-                'Tried to drop relation {}, but its type is null.'
-                .format(relation))
+                "Tried to drop relation {}, but its type is null.".format(relation)
+            )
 
         self.cache_dropped(relation)
-        self.execute_macro(
-            DROP_RELATION_MACRO_NAME,
-            kwargs={'relation': relation}
-        )
+        self.execute_macro(DROP_RELATION_MACRO_NAME, kwargs={"relation": relation})
 
     def truncate_relation(self, relation):
-        self.execute_macro(
-            TRUNCATE_RELATION_MACRO_NAME,
-            kwargs={'relation': relation}
-        )
+        self.execute_macro(TRUNCATE_RELATION_MACRO_NAME, kwargs={"relation": relation})
 
     def rename_relation(self, from_relation, to_relation):
         self.cache_renamed(from_relation, to_relation)
 
-        kwargs = {'from_relation': from_relation, 'to_relation': to_relation}
-        self.execute_macro(
-            RENAME_RELATION_MACRO_NAME,
-            kwargs=kwargs
-        )
+        kwargs = {"from_relation": from_relation, "to_relation": to_relation}
+        self.execute_macro(RENAME_RELATION_MACRO_NAME, kwargs=kwargs)
 
     def get_columns_in_relation(self, relation):
         return self.execute_macro(
-            GET_COLUMNS_IN_RELATION_MACRO_NAME,
-            kwargs={'relation': relation}
+            GET_COLUMNS_IN_RELATION_MACRO_NAME, kwargs={"relation": relation}
         )
 
     def create_schema(self, relation: BaseRelation) -> None:
         relation = relation.without_identifier()
         fire_event(SchemaCreation(relation=_make_key(relation)))
         kwargs = {
-            'relation': relation,
+            "relation": relation,
         }
         self.execute_macro(CREATE_SCHEMA_MACRO_NAME, kwargs=kwargs)
         self.commit_if_has_connection()
@@ -196,49 +168,42 @@ def drop_schema(self, relation: BaseRelation) -> None:
         relation = relation.without_identifier()
         fire_event(SchemaDrop(relation=_make_key(relation)))
         kwargs = {
-            'relation': relation,
+            "relation": relation,
         }
         self.execute_macro(DROP_SCHEMA_MACRO_NAME, kwargs=kwargs)
         # we can update the cache here
         self.cache.drop_schema(relation.database, relation.schema)
 
     def list_relations_without_caching(
-        self, schema_relation: BaseRelation,
+        self,
+        schema_relation: BaseRelation,
     ) -> List[BaseRelation]:
-        kwargs = {'schema_relation': schema_relation}
-        results = self.execute_macro(
-            LIST_RELATIONS_MACRO_NAME,
-            kwargs=kwargs
-        )
+        kwargs = {"schema_relation": schema_relation}
+        results = self.execute_macro(LIST_RELATIONS_MACRO_NAME, kwargs=kwargs)
 
         relations = []
-        quote_policy = {
-            'database': True,
-            'schema': True,
-            'identifier': True
-        }
+        quote_policy = {"database": True, "schema": True, "identifier": True}
         for _database, name, _schema, _type in results:
             try:
                 _type = self.Relation.get_relation_type(_type)
             except ValueError:
                 _type = self.Relation.External
-            relations.append(self.Relation.create(
-                database=_database,
-                schema=_schema,
-                identifier=name,
-                quote_policy=quote_policy,
-                type=_type
-            ))
+            relations.append(
+                self.Relation.create(
+                    database=_database,
+                    schema=_schema,
+                    identifier=name,
+                    quote_policy=quote_policy,
+                    type=_type,
+                )
+            )
         return relations
 
     def quote(self, identifier):
         return '"{}"'.format(identifier)
 
     def list_schemas(self, database: str) -> List[str]:
-        results = self.execute_macro(
-            LIST_SCHEMAS_MACRO_NAME,
-            kwargs={'database': database}
-        )
+        results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
 
         return [row[0] for row in results]
 
@@ -246,13 +211,10 @@ def check_schema_exists(self, database: str, schema: str) -> bool:
         information_schema = self.Relation.create(
             database=database,
             schema=schema,
-            identifier='INFORMATION_SCHEMA',
-            quote_policy=self.config.quoting
+            identifier="INFORMATION_SCHEMA",
+            quote_policy=self.config.quoting,
         ).information_schema()
 
-        kwargs = {'information_schema': information_schema, 'schema': schema}
-        results = self.execute_macro(
-            CHECK_SCHEMA_EXISTS_MACRO_NAME,
-            kwargs=kwargs
-        )
+        kwargs = {"information_schema": information_schema, "schema": schema}
+        results = self.execute_macro(CHECK_SCHEMA_EXISTS_MACRO_NAME, kwargs=kwargs)
         return results[0][0] > 0
diff --git a/core/dbt/clients/_jinja_blocks.py b/core/dbt/clients/_jinja_blocks.py
index 8a5a1dae948..761c6dfcb4d 100644
--- a/core/dbt/clients/_jinja_blocks.py
+++ b/core/dbt/clients/_jinja_blocks.py
@@ -10,79 +10,83 @@ def regex(pat):
 
 class BlockData:
     """raw plaintext data from the top level of the file."""
+
     def __init__(self, contents):
-        self.block_type_name = '__dbt__data'
+        self.block_type_name = "__dbt__data"
         self.contents = contents
         self.full_block = contents
 
 
 class BlockTag:
-    def __init__(self, block_type_name, block_name, contents=None,
-                 full_block=None, **kw):
+    def __init__(self, block_type_name, block_name, contents=None, full_block=None, **kw):
         self.block_type_name = block_type_name
         self.block_name = block_name
         self.contents = contents
         self.full_block = full_block
 
     def __str__(self):
-        return 'BlockTag({!r}, {!r})'.format(self.block_type_name,
-                                             self.block_name)
+        return "BlockTag({!r}, {!r})".format(self.block_type_name, self.block_name)
 
     def __repr__(self):
         return str(self)
 
     @property
     def end_block_type_name(self):
-        return 'end{}'.format(self.block_type_name)
+        return "end{}".format(self.block_type_name)
 
     def end_pat(self):
         # we don't want to use string formatting here because jinja uses most
         # of the string formatting operators in its syntax...
-        pattern = ''.join((
-            r'(?P((?:\s*\{\%\-|\{\%)\s*',
-            self.end_block_type_name,
-            r'\s*(?:\-\%\}\s*|\%\})))',
-        ))
+        pattern = "".join(
+            (
+                r"(?P((?:\s*\{\%\-|\{\%)\s*",
+                self.end_block_type_name,
+                r"\s*(?:\-\%\}\s*|\%\})))",
+            )
+        )
         return regex(pattern)
 
 
-Tag = namedtuple('Tag', 'block_type_name block_name start end')
+Tag = namedtuple("Tag", "block_type_name block_name start end")
 
 
-_NAME_PATTERN = r'[A-Za-z_][A-Za-z_0-9]*'
+_NAME_PATTERN = r"[A-Za-z_][A-Za-z_0-9]*"
 
-COMMENT_START_PATTERN = regex(r'(?:(?P(\s*\{\#)))')
-COMMENT_END_PATTERN = regex(r'(.*?)(\s*\#\})')
-RAW_START_PATTERN = regex(
-    r'(?:\s*\{\%\-|\{\%)\s*(?P(raw))\s*(?:\-\%\}\s*|\%\})'
-)
-EXPR_START_PATTERN = regex(r'(?P(\{\{\s*))')
-EXPR_END_PATTERN = regex(r'(?P(\s*\}\}))')
+COMMENT_START_PATTERN = regex(r"(?:(?P(\s*\{\#)))")
+COMMENT_END_PATTERN = regex(r"(.*?)(\s*\#\})")
+RAW_START_PATTERN = regex(r"(?:\s*\{\%\-|\{\%)\s*(?P(raw))\s*(?:\-\%\}\s*|\%\})")
+EXPR_START_PATTERN = regex(r"(?P(\{\{\s*))")
+EXPR_END_PATTERN = regex(r"(?P(\s*\}\}))")
 
-BLOCK_START_PATTERN = regex(''.join((
-    r'(?:\s*\{\%\-|\{\%)\s*',
-    r'(?P({}))'.format(_NAME_PATTERN),
-    # some blocks have a 'block name'.
-    r'(?:\s+(?P({})))?'.format(_NAME_PATTERN),
-)))
+BLOCK_START_PATTERN = regex(
+    "".join(
+        (
+            r"(?:\s*\{\%\-|\{\%)\s*",
+            r"(?P({}))".format(_NAME_PATTERN),
+            # some blocks have a 'block name'.
+            r"(?:\s+(?P({})))?".format(_NAME_PATTERN),
+        )
+    )
+)
 
 
-RAW_BLOCK_PATTERN = regex(''.join((
-    r'(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})',
-    r'(?:.*?)',
-    r'(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})',
-)))
+RAW_BLOCK_PATTERN = regex(
+    "".join(
+        (
+            r"(?:\s*\{\%\-|\{\%)\s*raw\s*(?:\-\%\}\s*|\%\})",
+            r"(?:.*?)",
+            r"(?:\s*\{\%\-|\{\%)\s*endraw\s*(?:\-\%\}\s*|\%\})",
+        )
+    )
+)
 
-TAG_CLOSE_PATTERN = regex(r'(?:(?P(\-\%\}\s*|\%\})))')
+TAG_CLOSE_PATTERN = regex(r"(?:(?P(\-\%\}\s*|\%\})))")
 
 # stolen from jinja's lexer. Note that we've consumed all prefix whitespace by
 # the time we want to use this.
-STRING_PATTERN = regex(
-    r"(?P('([^'\\]*(?:\\.[^'\\]*)*)'|"
-    r'"([^"\\]*(?:\\.[^"\\]*)*)"))'
-)
+STRING_PATTERN = regex(r"(?P('([^'\\]*(?:\\.[^'\\]*)*)'|" r'"([^"\\]*(?:\\.[^"\\]*)*)"))')
 
-QUOTE_START_PATTERN = regex(r'''(?P(['"]))''')
+QUOTE_START_PATTERN = regex(r"""(?P(['"]))""")
 
 
 class TagIterator:
@@ -99,10 +103,10 @@ def linepos(self, end=None) -> str:
         end_val: int = self.pos if end is None else end
         data = self.data[:end_val]
         # if not found, rfind returns -1, and -1+1=0, which is perfect!
-        last_line_start = data.rfind('\n') + 1
+        last_line_start = data.rfind("\n") + 1
         # it's easy to forget this, but line numbers are 1-indexed
-        line_number = data.count('\n') + 1
-        return f'{line_number}:{end_val - last_line_start}'
+        line_number = data.count("\n") + 1
+        return f"{line_number}:{end_val - last_line_start}"
 
     def advance(self, new_position):
         self.pos = new_position
@@ -120,7 +124,7 @@ def _first_match(self, *patterns, **kwargs):
         matches = []
         for pattern in patterns:
             # default to 'search', but sometimes we want to 'match'.
-            if kwargs.get('method', 'search') == 'search':
+            if kwargs.get("method", "search") == "search":
                 match = self._search(pattern)
             else:
                 match = self._match(pattern)
@@ -136,7 +140,7 @@ def _expect_match(self, expected_name, *patterns, **kwargs):
         match = self._first_match(*patterns, **kwargs)
         if match is None:
             msg = 'unexpected EOF, expected {}, got "{}"'.format(
-                expected_name, self.data[self.pos:]
+                expected_name, self.data[self.pos :]
             )
             dbt.exceptions.raise_compiler_error(msg)
         return match
@@ -156,22 +160,20 @@ def handle_expr(self, match):
         """
         self.advance(match.end())
         while True:
-            match = self._expect_match('}}',
-                                       EXPR_END_PATTERN,
-                                       QUOTE_START_PATTERN)
-            if match.groupdict().get('expr_end') is not None:
+            match = self._expect_match("}}", EXPR_END_PATTERN, QUOTE_START_PATTERN)
+            if match.groupdict().get("expr_end") is not None:
                 break
             else:
                 # it's a quote. we haven't advanced for this match yet, so
                 # just slurp up the whole string, no need to rewind.
-                match = self._expect_match('string', STRING_PATTERN)
+                match = self._expect_match("string", STRING_PATTERN)
                 self.advance(match.end())
 
         self.advance(match.end())
 
     def handle_comment(self, match):
         self.advance(match.end())
-        match = self._expect_match('#}', COMMENT_END_PATTERN)
+        match = self._expect_match("#}", COMMENT_END_PATTERN)
         self.advance(match.end())
 
     def _expect_block_close(self):
@@ -188,22 +190,19 @@ def _expect_block_close(self):
         """
         while True:
             end_match = self._expect_match(
-                'tag close ("%}")',
-                QUOTE_START_PATTERN,
-                TAG_CLOSE_PATTERN
+                'tag close ("%}")', QUOTE_START_PATTERN, TAG_CLOSE_PATTERN
             )
             self.advance(end_match.end())
-            if end_match.groupdict().get('tag_close') is not None:
+            if end_match.groupdict().get("tag_close") is not None:
                 return
             # must be a string. Rewind to its start and advance past it.
             self.rewind()
-            string_match = self._expect_match('string', STRING_PATTERN)
+            string_match = self._expect_match("string", STRING_PATTERN)
             self.advance(string_match.end())
 
     def handle_raw(self):
         # raw blocks are super special, they are a single complete regex
-        match = self._expect_match('{% raw %}...{% endraw %}',
-                                   RAW_BLOCK_PATTERN)
+        match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
         self.advance(match.end())
         return match.end()
 
@@ -220,30 +219,24 @@ def handle_tag(self, match):
         """
         groups = match.groupdict()
         # always a value
-        block_type_name = groups['block_type_name']
+        block_type_name = groups["block_type_name"]
         # might be None
-        block_name = groups.get('block_name')
+        block_name = groups.get("block_name")
         start_pos = self.pos
-        if block_type_name == 'raw':
-            match = self._expect_match('{% raw %}...{% endraw %}',
-                                       RAW_BLOCK_PATTERN)
+        if block_type_name == "raw":
+            match = self._expect_match("{% raw %}...{% endraw %}", RAW_BLOCK_PATTERN)
             self.advance(match.end())
         else:
             self.advance(match.end())
             self._expect_block_close()
         return Tag(
-            block_type_name=block_type_name,
-            block_name=block_name,
-            start=start_pos,
-            end=self.pos
+            block_type_name=block_type_name, block_name=block_name, start=start_pos, end=self.pos
         )
 
     def find_tags(self):
         while True:
             match = self._first_match(
-                BLOCK_START_PATTERN,
-                COMMENT_START_PATTERN,
-                EXPR_START_PATTERN
+                BLOCK_START_PATTERN, COMMENT_START_PATTERN, EXPR_START_PATTERN
             )
             if match is None:
                 break
@@ -252,9 +245,9 @@ def find_tags(self):
             # start = self.pos
 
             groups = match.groupdict()
-            comment_start = groups.get('comment_start')
-            expr_start = groups.get('expr_start')
-            block_type_name = groups.get('block_type_name')
+            comment_start = groups.get("comment_start")
+            expr_start = groups.get("expr_start")
+            block_type_name = groups.get("block_type_name")
 
             if comment_start is not None:
                 self.handle_comment(match)
@@ -264,8 +257,8 @@ def find_tags(self):
                 yield self.handle_tag(match)
             else:
                 raise dbt.exceptions.InternalException(
-                    'Invalid regex match in next_block, expected block start, '
-                    'expr start, or comment start'
+                    "Invalid regex match in next_block, expected block start, "
+                    "expr start, or comment start"
                 )
 
     def __iter__(self):
@@ -273,21 +266,18 @@ def __iter__(self):
 
 
 duplicate_tags = (
-    'Got nested tags: {outer.block_type_name} (started at {outer.start}) did '
-    'not have a matching {{% end{outer.block_type_name} %}} before a '
-    'subsequent {inner.block_type_name} was found (started at {inner.start})'
+    "Got nested tags: {outer.block_type_name} (started at {outer.start}) did "
+    "not have a matching {{% end{outer.block_type_name} %}} before a "
+    "subsequent {inner.block_type_name} was found (started at {inner.start})"
 )
 
 
 _CONTROL_FLOW_TAGS = {
-    'if': 'endif',
-    'for': 'endfor',
+    "if": "endif",
+    "for": "endfor",
 }
 
-_CONTROL_FLOW_END_TAGS = {
-    v: k
-    for k, v in _CONTROL_FLOW_TAGS.items()
-}
+_CONTROL_FLOW_END_TAGS = {v: k for k, v in _CONTROL_FLOW_TAGS.items()}
 
 
 class BlockIterator:
@@ -310,15 +300,15 @@ def data(self):
 
     def is_current_end(self, tag):
         return (
-            tag.block_type_name.startswith('end') and
-            self.current is not None and
-            tag.block_type_name[3:] == self.current.block_type_name
+            tag.block_type_name.startswith("end")
+            and self.current is not None
+            and tag.block_type_name[3:] == self.current.block_type_name
         )
 
     def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
         """Find all top-level blocks in the data."""
         if allowed_blocks is None:
-            allowed_blocks = {'snapshot', 'macro', 'materialization', 'docs'}
+            allowed_blocks = {"snapshot", "macro", "materialization", "docs"}
 
         for tag in self.tag_parser.find_tags():
             if tag.block_type_name in _CONTROL_FLOW_TAGS:
@@ -329,37 +319,35 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
                     found = self.stack.pop()
                 else:
                     expected = _CONTROL_FLOW_END_TAGS[tag.block_type_name]
-                    dbt.exceptions.raise_compiler_error((
-                        'Got an unexpected control flow end tag, got {} but '
-                        'never saw a preceeding {} (@ {})'
-                    ).format(
-                        tag.block_type_name,
-                        expected,
-                        self.tag_parser.linepos(tag.start)
-                    ))
+                    dbt.exceptions.raise_compiler_error(
+                        (
+                            "Got an unexpected control flow end tag, got {} but "
+                            "never saw a preceeding {} (@ {})"
+                        ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start))
+                    )
                 expected = _CONTROL_FLOW_TAGS[found]
                 if expected != tag.block_type_name:
-                    dbt.exceptions.raise_compiler_error((
-                        'Got an unexpected control flow end tag, got {} but '
-                        'expected {} next (@ {})'
-                    ).format(
-                        tag.block_type_name,
-                        expected,
-                        self.tag_parser.linepos(tag.start)
-                    ))
+                    dbt.exceptions.raise_compiler_error(
+                        (
+                            "Got an unexpected control flow end tag, got {} but "
+                            "expected {} next (@ {})"
+                        ).format(tag.block_type_name, expected, self.tag_parser.linepos(tag.start))
+                    )
 
             if tag.block_type_name in allowed_blocks:
                 if self.stack:
-                    dbt.exceptions.raise_compiler_error((
-                        'Got a block definition inside control flow at {}. '
-                        'All dbt block definitions must be at the top level'
-                    ).format(self.tag_parser.linepos(tag.start)))
+                    dbt.exceptions.raise_compiler_error(
+                        (
+                            "Got a block definition inside control flow at {}. "
+                            "All dbt block definitions must be at the top level"
+                        ).format(self.tag_parser.linepos(tag.start))
+                    )
                 if self.current is not None:
                     dbt.exceptions.raise_compiler_error(
                         duplicate_tags.format(outer=self.current, inner=tag)
                     )
                 if collect_raw_data:
-                    raw_data = self.data[self.last_position:tag.start]
+                    raw_data = self.data[self.last_position : tag.start]
                     self.last_position = tag.start
                     if raw_data:
                         yield BlockData(raw_data)
@@ -371,23 +359,25 @@ def find_blocks(self, allowed_blocks=None, collect_raw_data=True):
                 yield BlockTag(
                     block_type_name=self.current.block_type_name,
                     block_name=self.current.block_name,
-                    contents=self.data[self.current.end:tag.start],
-                    full_block=self.data[self.current.start:tag.end]
+                    contents=self.data[self.current.end : tag.start],
+                    full_block=self.data[self.current.start : tag.end],
                 )
                 self.current = None
 
         if self.current:
-            linecount = self.data[:self.current.end].count('\n') + 1
-            dbt.exceptions.raise_compiler_error((
-                'Reached EOF without finding a close tag for '
-                '{} (searched from line {})'
-            ).format(self.current.block_type_name, linecount))
+            linecount = self.data[: self.current.end].count("\n") + 1
+            dbt.exceptions.raise_compiler_error(
+                (
+                    "Reached EOF without finding a close tag for " "{} (searched from line {})"
+                ).format(self.current.block_type_name, linecount)
+            )
 
         if collect_raw_data:
-            raw_data = self.data[self.last_position:]
+            raw_data = self.data[self.last_position :]
             if raw_data:
                 yield BlockData(raw_data)
 
     def lex_for_blocks(self, allowed_blocks=None, collect_raw_data=True):
-        return list(self.find_blocks(allowed_blocks=allowed_blocks,
-                                     collect_raw_data=collect_raw_data))
+        return list(
+            self.find_blocks(allowed_blocks=allowed_blocks, collect_raw_data=collect_raw_data)
+        )
diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py
index 06ccf5168f6..ceadf0bf9a7 100644
--- a/core/dbt/clients/agate_helper.py
+++ b/core/dbt/clients/agate_helper.py
@@ -10,7 +10,7 @@
 from dbt.exceptions import RuntimeException
 
 
-BOM = BOM_UTF8.decode('utf-8')  # '\ufeff'
+BOM = BOM_UTF8.decode("utf-8")  # '\ufeff'
 
 
 class Number(agate.data_types.Number):
@@ -18,9 +18,7 @@ class Number(agate.data_types.Number):
     # i.e. do not cast True and False to numeric 1 and 0
     def cast(self, d):
         if type(d) == bool:
-            raise agate.exceptions.CastError(
-                'Do not cast True to 1 or False to 0.'
-            )
+            raise agate.exceptions.CastError("Do not cast True to 1 or False to 0.")
         else:
             return super().cast(d)
 
@@ -42,32 +40,24 @@ def cast(self, d):
         except:  # noqa
             pass
 
-        raise agate.exceptions.CastError(
-            'Can not parse value "%s" as datetime.' % d
-        )
+        raise agate.exceptions.CastError('Can not parse value "%s" as datetime.' % d)
 
 
 def build_type_tester(
-    text_columns: Iterable[str],
-    string_null_values: Optional[Iterable[str]] = ('null', '')
+    text_columns: Iterable[str], string_null_values: Optional[Iterable[str]] = ("null", "")
 ) -> agate.TypeTester:
 
     types = [
-        Number(null_values=('null', '')),
-        agate.data_types.Date(null_values=('null', ''),
-                              date_format='%Y-%m-%d'),
-        agate.data_types.DateTime(null_values=('null', ''),
-                                  datetime_format='%Y-%m-%d %H:%M:%S'),
-        ISODateTime(null_values=('null', '')),
-        agate.data_types.Boolean(true_values=('true',),
-                                 false_values=('false',),
-                                 null_values=('null', '')),
-        agate.data_types.Text(null_values=string_null_values)
+        Number(null_values=("null", "")),
+        agate.data_types.Date(null_values=("null", ""), date_format="%Y-%m-%d"),
+        agate.data_types.DateTime(null_values=("null", ""), datetime_format="%Y-%m-%d %H:%M:%S"),
+        ISODateTime(null_values=("null", "")),
+        agate.data_types.Boolean(
+            true_values=("true",), false_values=("false",), null_values=("null", "")
+        ),
+        agate.data_types.Text(null_values=string_null_values),
     ]
-    force = {
-        k: agate.data_types.Text(null_values=string_null_values)
-        for k in text_columns
-    }
+    force = {k: agate.data_types.Text(null_values=string_null_values) for k in text_columns}
     return agate.TypeTester(force=force, types=types)
 
 
@@ -84,10 +74,7 @@ def table_from_rows(
     else:
         # If text_only_columns are present, prevent coercing empty string or
         # literal 'null' strings to a None representation.
-        column_types = build_type_tester(
-            text_only_columns,
-            string_null_values=()
-        )
+        column_types = build_type_tester(text_only_columns, string_null_values=())
 
     return agate.Table(rows, column_names, column_types=column_types)
 
@@ -132,9 +119,7 @@ def table_from_data_flat(data, column_names: Iterable[str]) -> agate.Table:
         rows.append(row)
 
     return table_from_rows(
-        rows=rows,
-        column_names=column_names,
-        text_only_columns=text_only_columns
+        rows=rows, column_names=column_names, text_only_columns=text_only_columns
     )
 
 
@@ -152,7 +137,7 @@ def as_matrix(table):
 
 def from_csv(abspath, text_columns):
     type_tester = build_type_tester(text_columns=text_columns)
-    with open(abspath, encoding='utf-8') as fp:
+    with open(abspath, encoding="utf-8") as fp:
         if fp.read(1) != BOM:
             fp.seek(0)
         return agate.Table.from_csv(fp, column_types=type_tester)
@@ -184,8 +169,8 @@ def __setitem__(self, key, value):
         elif not isinstance(value, type(existing_type)):
             # actual type mismatch!
             raise RuntimeException(
-                f'Tables contain columns with the same names ({key}), '
-                f'but different types ({value} vs {existing_type})'
+                f"Tables contain columns with the same names ({key}), "
+                f"but different types ({value} vs {existing_type})"
             )
 
     def finalize(self) -> Dict[str, agate.data_types.DataType]:
@@ -199,9 +184,7 @@ def finalize(self) -> Dict[str, agate.data_types.DataType]:
         return result
 
 
-def _merged_column_types(
-    tables: List[agate.Table]
-) -> Dict[str, agate.data_types.DataType]:
+def _merged_column_types(tables: List[agate.Table]) -> Dict[str, agate.data_types.DataType]:
     # this is a lot like agate.Table.merge, but with handling for all-null
     # rows being "any type".
     new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
@@ -227,10 +210,7 @@ def merge_tables(tables: List[agate.Table]) -> agate.Table:
 
     rows: List[agate.Row] = []
     for table in tables:
-        if (
-            table.column_names == column_names and
-            table.column_types == column_types
-        ):
+        if table.column_names == column_names and table.column_types == column_types:
             rows.extend(table.rows)
         else:
             for row in table.rows:
diff --git a/core/dbt/clients/git.py b/core/dbt/clients/git.py
index cde9ccc77a4..6d3c484f371 100644
--- a/core/dbt/clients/git.py
+++ b/core/dbt/clients/git.py
@@ -4,13 +4,20 @@
 from dbt.clients.system import run_cmd, rmdir
 from dbt.events.functions import fire_event
 from dbt.events.types import (
-    GitSparseCheckoutSubdirectory, GitProgressCheckoutRevision,
-    GitProgressUpdatingExistingDependency, GitProgressPullingNewDependency,
-    GitNothingToDo, GitProgressUpdatedCheckoutRange, GitProgressCheckedOutAt
+    GitSparseCheckoutSubdirectory,
+    GitProgressCheckoutRevision,
+    GitProgressUpdatingExistingDependency,
+    GitProgressPullingNewDependency,
+    GitNothingToDo,
+    GitProgressUpdatedCheckoutRange,
+    GitProgressCheckedOutAt,
 )
 from dbt.exceptions import (
-    CommandResultError, RuntimeException, bad_package_spec, raise_git_cloning_error,
-    raise_git_cloning_problem
+    CommandResultError,
+    RuntimeException,
+    bad_package_spec,
+    raise_git_cloning_error,
+    raise_git_cloning_problem,
 )
 from packaging import version
 
@@ -21,9 +28,9 @@ def _is_commit(revision: str) -> bool:
 
 
 def _raise_git_cloning_error(repo, revision, error):
-    stderr = error.stderr.decode('utf-8').strip()
-    if 'usage: git' in stderr:
-        stderr = stderr.split('\nusage: git')[0]
+    stderr = error.stderr.decode("utf-8").strip()
+    if "usage: git" in stderr:
+        stderr = stderr.split("\nusage: git")[0]
     if re.match("fatal: destination path '(.+)' already exists", stderr):
         raise_git_cloning_error(error)
 
@@ -34,10 +41,10 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec
     has_revision = revision is not None
     is_commit = _is_commit(revision or "")
 
-    clone_cmd = ['git', 'clone', '--depth', '1']
+    clone_cmd = ["git", "clone", "--depth", "1"]
     if subdirectory:
         fire_event(GitSparseCheckoutSubdirectory(subdir=subdirectory))
-        out, _ = run_cmd(cwd, ['git', '--version'], env={'LC_ALL': 'C'})
+        out, _ = run_cmd(cwd, ["git", "--version"], env={"LC_ALL": "C"})
         git_version = version.parse(re.search(r"\d+\.\d+\.\d+", out.decode("utf-8")).group(0))
         if not git_version >= version.parse("2.25.0"):
             # 2.25.0 introduces --sparse
@@ -45,37 +52,37 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec
                 "Please update your git version to pull a dbt package "
                 "from a subdirectory: your version is {}, >= 2.25.0 needed".format(git_version)
             )
-        clone_cmd.extend(['--filter=blob:none', '--sparse'])
+        clone_cmd.extend(["--filter=blob:none", "--sparse"])
 
     if has_revision and not is_commit:
-        clone_cmd.extend(['--branch', revision])
+        clone_cmd.extend(["--branch", revision])
 
     clone_cmd.append(repo)
 
     if dirname is not None:
         clone_cmd.append(dirname)
     try:
-        result = run_cmd(cwd, clone_cmd, env={'LC_ALL': 'C'})
+        result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"})
     except CommandResultError as exc:
         _raise_git_cloning_error(repo, revision, exc)
 
     if subdirectory:
-        cwd_subdir = os.path.join(cwd, dirname or '')
-        clone_cmd_subdir = ['git', 'sparse-checkout', 'set', subdirectory]
+        cwd_subdir = os.path.join(cwd, dirname or "")
+        clone_cmd_subdir = ["git", "sparse-checkout", "set", subdirectory]
         try:
             run_cmd(cwd_subdir, clone_cmd_subdir)
         except CommandResultError as exc:
             _raise_git_cloning_error(repo, revision, exc)
 
     if remove_git_dir:
-        rmdir(os.path.join(dirname, '.git'))
+        rmdir(os.path.join(dirname, ".git"))
 
     return result
 
 
 def list_tags(cwd):
-    out, err = run_cmd(cwd, ['git', 'tag', '--list'], env={'LC_ALL': 'C'})
-    tags = out.decode('utf-8').strip().split("\n")
+    out, err = run_cmd(cwd, ["git", "tag", "--list"], env={"LC_ALL": "C"})
+    tags = out.decode("utf-8").strip().split("\n")
     return tags
 
 
@@ -87,44 +94,44 @@ def _checkout(cwd, repo, revision):
     if _is_commit(revision):
         run_cmd(cwd, fetch_cmd + [revision])
     else:
-        run_cmd(cwd, ['git', 'remote', 'set-branches', 'origin', revision])
+        run_cmd(cwd, ["git", "remote", "set-branches", "origin", revision])
         run_cmd(cwd, fetch_cmd + ["--tags", revision])
 
     if _is_commit(revision):
         spec = revision
     # Prefer tags to branches if one exists
     elif revision in list_tags(cwd):
-        spec = 'tags/{}'.format(revision)
+        spec = "tags/{}".format(revision)
     else:
-        spec = 'origin/{}'.format(revision)
+        spec = "origin/{}".format(revision)
 
-    out, err = run_cmd(cwd, ['git', 'reset', '--hard', spec],
-                       env={'LC_ALL': 'C'})
+    out, err = run_cmd(cwd, ["git", "reset", "--hard", spec], env={"LC_ALL": "C"})
     return out, err
 
 
 def checkout(cwd, repo, revision=None):
     if revision is None:
-        revision = 'HEAD'
+        revision = "HEAD"
     try:
         return _checkout(cwd, repo, revision)
     except CommandResultError as exc:
-        stderr = exc.stderr.decode('utf-8').strip()
+        stderr = exc.stderr.decode("utf-8").strip()
     bad_package_spec(repo, revision, stderr)
 
 
 def get_current_sha(cwd):
-    out, err = run_cmd(cwd, ['git', 'rev-parse', 'HEAD'], env={'LC_ALL': 'C'})
+    out, err = run_cmd(cwd, ["git", "rev-parse", "HEAD"], env={"LC_ALL": "C"})
 
-    return out.decode('utf-8')
+    return out.decode("utf-8")
 
 
 def remove_remote(cwd):
-    return run_cmd(cwd, ['git', 'remote', 'rm', 'origin'], env={'LC_ALL': 'C'})
+    return run_cmd(cwd, ["git", "remote", "rm", "origin"], env={"LC_ALL": "C"})
 
 
-def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
-                       revision=None, subdirectory=None):
+def clone_and_checkout(
+    repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirectory=None
+):
     exists = None
     try:
         _, err = clone(
@@ -135,7 +142,7 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
             subdirectory=subdirectory,
         )
     except CommandResultError as exc:
-        err = exc.stderr.decode('utf-8')
+        err = exc.stderr.decode("utf-8")
         exists = re.match("fatal: destination path '(.+)' already exists", err)
         if not exists:
             raise_git_cloning_problem(repo)
@@ -146,11 +153,9 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
         directory = exists.group(1)
         fire_event(GitProgressUpdatingExistingDependency(dir=directory))
     else:
-        matches = re.match("Cloning into '(.+)'", err.decode('utf-8'))
+        matches = re.match("Cloning into '(.+)'", err.decode("utf-8"))
         if matches is None:
-            raise RuntimeException(
-                f'Error cloning {repo} - never saw "Cloning into ..." from git'
-            )
+            raise RuntimeException(f'Error cloning {repo} - never saw "Cloning into ..." from git')
         directory = matches.group(1)
         fire_event(GitProgressPullingNewDependency(dir=directory))
     full_path = os.path.join(cwd, directory)
@@ -161,9 +166,9 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
         if start_sha == end_sha:
             fire_event(GitNothingToDo(sha=start_sha[:7]))
         else:
-            fire_event(GitProgressUpdatedCheckoutRange(
-                start_sha=start_sha[:7], end_sha=end_sha[:7]
-            ))
+            fire_event(
+                GitProgressUpdatedCheckoutRange(start_sha=start_sha[:7], end_sha=end_sha[:7])
+            )
     else:
         fire_event(GitProgressCheckedOutAt(end_sha=end_sha[:7]))
-    return os.path.join(directory, subdirectory or '')
+    return os.path.join(directory, subdirectory or "")
diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py
index 0ba25237336..ec5eec7bd63 100644
--- a/core/dbt/clients/jinja.py
+++ b/core/dbt/clients/jinja.py
@@ -7,10 +7,7 @@
 from ast import literal_eval
 from contextlib import contextmanager
 from itertools import chain, islice
-from typing import (
-    List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple,
-    Callable
-)
+from typing import List, Union, Set, Optional, Dict, Any, Iterator, Type, NoReturn, Tuple, Callable
 
 import jinja2
 import jinja2.ext
@@ -20,17 +17,24 @@
 import jinja2.sandbox
 
 from dbt.utils import (
-    get_dbt_macro_name, get_docs_macro_name, get_materialization_macro_name,
-    get_test_macro_name, deep_map_render
+    get_dbt_macro_name,
+    get_docs_macro_name,
+    get_materialization_macro_name,
+    get_test_macro_name,
+    deep_map_render,
 )
 
 from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag
 from dbt.contracts.graph.compiled import CompiledGenericTestNode
 from dbt.contracts.graph.parsed import ParsedGenericTestNode
 from dbt.exceptions import (
-    InternalException, raise_compiler_error, CompilationException,
-    invalid_materialization_argument, MacroReturn, JinjaRenderingException,
-    UndefinedMacroException
+    InternalException,
+    raise_compiler_error,
+    CompilationException,
+    invalid_materialization_argument,
+    MacroReturn,
+    JinjaRenderingException,
+    UndefinedMacroException,
 )
 from dbt import flags
 
@@ -40,27 +44,22 @@ def _linecache_inject(source, write):
         # this is the only reliable way to accomplish this. Obviously, it's
         # really darn noisy and will fill your temporary directory
         tmp_file = tempfile.NamedTemporaryFile(
-            prefix='dbt-macro-compiled-',
-            suffix='.py',
+            prefix="dbt-macro-compiled-",
+            suffix=".py",
             delete=False,
-            mode='w+',
-            encoding='utf-8',
+            mode="w+",
+            encoding="utf-8",
         )
         tmp_file.write(source)
         filename = tmp_file.name
     else:
         # `codecs.encode` actually takes a `bytes` as the first argument if
         # the second argument is 'hex' - mypy does not know this.
-        rnd = codecs.encode(os.urandom(12), 'hex')  # type: ignore
-        filename = rnd.decode('ascii')
+        rnd = codecs.encode(os.urandom(12), "hex")  # type: ignore
+        filename = rnd.decode("ascii")
 
     # put ourselves in the cache
-    cache_entry = (
-        len(source),
-        None,
-        [line + '\n' for line in source.splitlines()],
-        filename
-    )
+    cache_entry = (len(source), None, [line + "\n" for line in source.splitlines()], filename)
     # linecache does in fact have an attribute `cache`, thanks
     linecache.cache[filename] = cache_entry  # type: ignore
     return filename
@@ -73,12 +72,10 @@ def parse_macro(self):
         # modified to fuzz macros defined in the same file. this way
         # dbt can understand the stack of macros being called.
         #  - @cmcarthur
-        node.name = get_dbt_macro_name(
-            self.parse_assign_target(name_only=True).name)
+        node.name = get_dbt_macro_name(self.parse_assign_target(name_only=True).name)
 
         self.parse_signature(node)
-        node.body = self.parse_statements(('name:endmacro',),
-                                          drop_needle=True)
+        node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
         return node
 
 
@@ -94,8 +91,8 @@ def _compile(self, source, filename):
         If the value is 'write', also write the files to disk.
         WARNING: This can write a ton of data if you aren't careful.
         """
-        if filename == '