Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make mypy pass #92

Merged
merged 5 commits into from
Oct 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
[flake8]
max-line-length = 88
extend-ignore = E203
exclude =
__init__.py
__pycache__
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,8 @@ jobs:
- name: Code Formatting
uses: psf/black@stable

- name: Type Check
run: poetry run mypy open_rarity

- name: Unit tests
run: poetry run pytest --cov-config=.coveragerc --cov=open_rarity tests/
12 changes: 12 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,15 @@ repos:
hooks:
- id: flake8
additional_dependencies: [flake8-bugbear, pep8-naming]

# Mypy
# run locally to ensure
- repo: local
hooks:
- id: mypy
name: mypy
types: [python]
entry: poetry run mypy open_rarity
language: system
always_run: true
pass_filenames: false
4 changes: 2 additions & 2 deletions open_rarity/models/token.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Any
from typing import Any, Iterable

from open_rarity.models.token_identifier import (
EVMContractTokenIdentifier,
Expand Down Expand Up @@ -125,7 +125,7 @@ def has_attribute(self, attribute_name: str) -> bool:
def trait_count(self) -> int:
"""Returns the count of non-null, non-"none" value traits this token has."""

def get_attributes_count(attributes: list[Attribute]) -> int:
def get_attributes_count(attributes: Iterable[Attribute]) -> int:
return sum(
map(
lambda a: (
Expand Down
8 changes: 6 additions & 2 deletions open_rarity/models/token_identifier.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Annotated, Literal
from typing import Annotated, Literal, Type, TypeAlias, Union

from pydantic import Field

Expand Down Expand Up @@ -66,8 +66,12 @@ def to_dict(self) -> dict:
Field(discriminator="identifier_type"),
]

TokenIdentifierClass: TypeAlias = Union[
Type[EVMContractTokenIdentifier], Type[SolanaMintAddressTokenIdentifier]
]


def get_identifier_class_from_dict(data_dict: dict) -> TokenIdentifier:
def get_identifier_class_from_dict(data_dict: dict) -> TokenIdentifierClass:
return (
EVMContractTokenIdentifier
if "token_id" in data_dict
Expand Down
16 changes: 9 additions & 7 deletions open_rarity/models/token_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,13 +163,15 @@ class attribute type

def to_attributes(self) -> dict[AttributeName, Any]:
"""Returns a dictionary of all attributes in this metadata object."""
attributes = {}
for attr in self.string_attributes.values():
attributes[attr.name] = attr.value
for attr in self.numeric_attributes.values():
attributes[attr.name] = attr.value
for attr in self.date_attributes.values():
attributes[attr.name] = datetime.fromtimestamp(attr.value)
attributes: dict[AttributeName, Any] = {}
for str_attr in self.string_attributes.values():
attributes[str_attr.name] = str_attr.value
for num_attr in self.numeric_attributes.values():
attributes[num_attr.name] = num_attr.value
for date_attr in self.date_attributes.values():
attributes[date_attr.name] = datetime.datetime.fromtimestamp(
date_attr.value
)
block-chaynes marked this conversation as resolved.
Show resolved Hide resolved
return attributes

def add_attribute(self, attribute: Attribute):
Expand Down
Empty file.
1 change: 1 addition & 0 deletions open_rarity/rarity_ranker.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def set_rarity_ranks(
prev_token_rarity = sorted_token_rarities[i - 1]
scores_equal = math.isclose(token_rarity.score, prev_token_rarity.score)
if scores_equal:
assert prev_token_rarity.rank is not None
rank = prev_token_rarity.rank

token_rarity.rank = rank
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,7 @@ def _add_rarity_data(
# If we didn't want to load cache or cache is empty, pull data from API
if not self._is_cache_loaded(slug, rank_provider):
resolver = get_external_resolver(rank_provider)
token_ids_to_ranks = resolver.get_all_ranks(
contract_address=contract_address
)
token_ids_to_ranks = resolver.get_all_ranks(contract_address)
self._set_cache(
slug=slug, rank_provider=rank_provider, rank_data=token_ids_to_ranks
)
Expand Down Expand Up @@ -228,9 +226,10 @@ def _add_rarity_sniper_rarity_data(
)

# Store in cache
self._get_cache_for_collection(opensea_slug, rank_provider)[
str(token_id)
] = rank
if rank is not None:
self._get_cache_for_collection(opensea_slug, rank_provider)[
str(token_id)
] = rank

if rank:
token_with_rarity.rarities.append(
Expand Down
2 changes: 1 addition & 1 deletion open_rarity/resolver/rarity_providers/rank_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,5 @@
class RankResolver(Protocol):
@staticmethod
@abstractmethod
def get_all_ranks(contract_address: str | None, slug: str | None) -> dict[str, int]:
def get_all_ranks(contract_address_or_slug: str) -> dict[str, int]:
raise NotImplementedError
2 changes: 1 addition & 1 deletion open_rarity/resolver/rarity_providers/trait_sniper.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def get_rank(collection_slug: str, token_id: int) -> int | None:
"""
# TODO [vicky]: In future, we can add retry mechanisms if needed

querystring = {
querystring: dict[str, str | int] = {
"trait_norm": "true",
"trait_count": "true",
"token_id": token_id,
Expand Down
34 changes: 22 additions & 12 deletions open_rarity/resolver/testset_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import pkgutil
from dataclasses import dataclass
from time import strftime, time
from typing import Iterable

import numpy as np

Expand Down Expand Up @@ -174,7 +175,10 @@ def get_tokens_with_rarity(

t1_start = time()

for batch_id, tokens_batch in enumerate(np.array_split(tokens, num_batches)):
tokens_batch: Iterable[Token]
for batch_id, tokens_batch in enumerate(
np.array_split(tokens, num_batches) # type: ignore
):
message = (
f"\tStarting batch {batch_id} for collection "
f"{slug}: Processing {len(tokens_batch)} tokens. "
Expand Down Expand Up @@ -259,7 +263,7 @@ def resolve_collection_data(

data = json.load(io.BytesIO(golden_collections))
print("------------------------------")
slugs_to_rows = {}
output_rows: list = []
for collection_def in data:
start_time = time()
opensea_slug = collection_def["collection_slug"]
Expand Down Expand Up @@ -308,11 +312,12 @@ def resolve_collection_data(
tokens_with_rarity=tokens_with_rarity,
dry_run=not output_file_to_disk,
)
slugs_to_rows[opensea_slug] = rows
if rows is not None:
output_rows += rows
time_elapsed = round(time() - start_time)
print(f"FINISHED: Resolved collection: {opensea_slug} in {time_elapsed} secs")

return slugs_to_rows if not output_file_to_disk else None
return output_rows if not output_file_to_disk else None


def augment_with_open_rarity_scores(
Expand Down Expand Up @@ -354,29 +359,34 @@ def augment_with_open_rarity_scores(
)


def extract_rank(tokens_to_score: dict[int, TokenRarity]) -> RankedTokens:
def extract_rank(tokens_to_score: dict[str, TokenRarity]) -> RankedTokens:
"""Sorts dictionary by float score and extract rank according to the score

Parameters
----------
token_id_to_scores : dict[int, TokenRarity]
token_id_to_scores : dict[str, TokenRarity]
dictionary of token_id_to_scores with token_id to score mapping

Returns
-------
dict[int, RankScore]
dict[str, RankScore]
dictionary of token to rank, score pair
"""
ranked_tokens: list[TokenRarity] = RarityRanker.set_rarity_ranks(
token_rarities=tokens_to_score.values()
token_rarities=list(tokens_to_score.values())
)
return {
int(token.token.token_identifier.token_id): (

result = {}
for token in ranked_tokens:
assert token.rank
# note: this is a bug, ignoring the mypy error for now
# token_identifier can be of type SolanaMintAddressTokenIdentifier
# which has no token_id
result[int(token.token.token_identifier.token_id)] = ( # type: ignore
block-chaynes marked this conversation as resolved.
Show resolved Hide resolved
token.rank,
token.score,
)
for token in ranked_tokens
}
return result


def resolve_open_rarity_score(
Expand Down
Empty file.
Loading