From e4ff80cbda72aa229d07866dcc619bc1292f4c6e Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 11:49:40 -0700 Subject: [PATCH 1/7] add from_attributes to TokenMetadata --- open_rarity/models/token_metadata.py | 54 ++++++++++++++++++++++++++++ tests/models/test_token_metadata.py | 51 ++++++++++++++++++++++++++ 2 files changed, 105 insertions(+) create mode 100644 tests/models/test_token_metadata.py diff --git a/open_rarity/models/token_metadata.py b/open_rarity/models/token_metadata.py index 3785050..b138f6c 100644 --- a/open_rarity/models/token_metadata.py +++ b/open_rarity/models/token_metadata.py @@ -1,4 +1,6 @@ from dataclasses import dataclass, field +import datetime +from typing import Any from open_rarity.models.utils.attribute_utils import normalize_attribute_string @@ -83,3 +85,55 @@ class TokenMetadata: date_attributes: dict[AttributeName, DateAttribute] = field( default_factory=dict ) + + @classmethod + def from_attributes(cls, attributes: dict[AttributeName, Any]): + """Constructs TokenMetadata class based on an attributes dictionary + + Parameters + ---------- + attributes : dict[AttributeName, Any] + Dictionary of attribute name to attribute value for the given token. + The type of the value determines whether the attribute is a string, + numeric or date attribute. + + class attribute type + ------------ ------------- + string string attribute + int | float numeric_attribute + datetime date_attribute (stored as timestamp, seconds from epoch) + + Returns + ------- + TokenMetadata + token metadata from input + """ + string_attributes = {} + numeric_attributes = {} + date_attributes = {} + for attr_name, attr_value in attributes.items(): + normalized_attr_name = normalize_attribute_string(attr_name) + if isinstance(attr_value, str): + string_attributes[normalized_attr_name] = StringAttribute( + name=normalized_attr_name, value=attr_value + ) + elif isinstance(attr_value, (float, int)): + numeric_attributes[normalized_attr_name] = NumericAttribute( + name=normalized_attr_name, value=attr_value + ) + elif isinstance(attr_value, datetime.datetime): + date_attributes[normalized_attr_name] = DateAttribute( + name=normalized_attr_name, + value=int(attr_value.timestamp()), + ) + else: + raise TypeError( + f"Provided attribute value has invalid type: {type(attr_value)}. " + "Must be either str, float, int or datetime." + ) + + return cls( + string_attributes=string_attributes, + numeric_attributes=numeric_attributes, + date_attributes=date_attributes, + ) diff --git a/tests/models/test_token_metadata.py b/tests/models/test_token_metadata.py new file mode 100644 index 0000000..c378b31 --- /dev/null +++ b/tests/models/test_token_metadata.py @@ -0,0 +1,51 @@ +import pytest +from open_rarity.models.token_metadata import ( + DateAttribute, + NumericAttribute, + StringAttribute, + TokenMetadata, +) +from datetime import datetime + + +class TestTokenMetadata: + def test_from_attributes_valid_types(self): + created = datetime.now() + token_metadata = TokenMetadata.from_attributes( + { + "hat": "blue cap", + "created": created, + "integer trait": 1, + "float trait": 203.5, + "PANTS ": "jeans", + } + ) + + assert token_metadata.string_attributes == { + "hat": StringAttribute(name="hat", value="blue cap"), + "pants": StringAttribute(name="pants", value="jeans"), + } + assert token_metadata.numeric_attributes == { + "integer trait": NumericAttribute(name="integer trait", value=1), + "float trait": NumericAttribute(name="float trait", value=203.5), + } + assert token_metadata.date_attributes == { + "created": DateAttribute( + name="created", value=int(created.timestamp()) + ), + } + + def test_from_attributes_invalid_type(self): + with pytest.raises(TypeError) as excinfo: + TokenMetadata.from_attributes( + { + "hat": "blue cap", + "created": {"bad input": "true"}, + "integer trait": 1, + "float trait": 203.5, + } + ) + + assert "Provided attribute value has invalid type" in str( + excinfo.value + ) From fa978a514582f3ab7ffdda6b76533b970f81b6aa Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 11:59:13 -0700 Subject: [PATCH 2/7] Make collection attr freq optional and derivable --- open_rarity/models/collection.py | 78 ++++++++++++++++++++++++++++---- tests/models/test_collection.py | 64 +++++++++++++++++++++++++- 2 files changed, 133 insertions(+), 9 deletions(-) diff --git a/open_rarity/models/collection.py b/open_rarity/models/collection.py index da04132..6a62939 100644 --- a/open_rarity/models/collection.py +++ b/open_rarity/models/collection.py @@ -57,23 +57,53 @@ class Collection: """ attributes_frequency_counts: dict[AttributeName, dict[AttributeValue, int]] - name: str | None = "" + name: str def __init__( self, + tokens: list[Token], attributes_frequency_counts: dict[ AttributeName, dict[AttributeValue, int] - ], - tokens: list[Token], + ] + | None = None, name: str | None = "", ): - self._tokens = tokens - self.attributes_frequency_counts = ( - self._normalize_attributes_frequency_counts( + """ + Parameters + ---------- + tokens : list[Token] + list of all tokens that belong to the collection. Must have meteadata + properly set if attributes_frequency_counts is not provided. + attributes_frequency_counts: + dict[AttributeName, dict[AttributeValue, int]] | None, optional + dictionary of attributes to the number of tokens in this collection + that has a specific value for every possible value for the given + attribute, by default None. + If not provided, the attributes distribution will be derived from the + attributes on the tokens provided. + + Example: + {"hair": {"brown": 500, "blonde": 100} + which means 500 tokens has hair=brown, 100 token has hair=blonde + Note: All trait names and string values should be lowercased and stripped + of leading and trialing whitespace. + Note 2: We currently only support string attributes in attributes_frequency_counts + name : str | None, optional + A reference string only used for debugging or identification, by default "" + """ + self._tokens = tokens + self.name = name or "" + if attributes_frequency_counts: + self.attributes_frequency_counts = ( + self._normalize_attributes_frequency_counts( + attributes_frequency_counts + ) + ) + else: + self.attributes_frequency_counts = ( + self._derive_normalized_attributes_frequency_counts() ) - ) - self.name = name @property def tokens(self) -> list[Token]: @@ -231,5 +261,37 @@ def extract_collection_attributes( return collection_traits + def _derive_normalized_attributes_frequency_counts( + self, + ) -> dict[AttributeName, dict[AttributeValue, int]]: + """Derives and constructs attributes_frequency_counts based on + string attributes on tokens. Numeric or date attributes currently not + supported. + + Returns + ------- + dict[ AttributeName, dict[AttributeValue, int] ] + dictionary of attributes to the number of tokens in this collection + that has a specific value for every possible value for the given + attribute, by default None. + """ + attrs_freq_counts: dict[ + AttributeName, dict[AttributeValue, int] + ] = defaultdict(dict) + + for token in self._tokens: + for ( + attr_name, + str_attr, + ) in token.metadata.string_attributes.items(): + normalized_name = normalize_attribute_string(attr_name) + normalized_value = normalize_attribute_string(str_attr.value) + if normalized_value not in attrs_freq_counts[attr_name]: + attrs_freq_counts[normalized_name][normalized_value] = 1 + else: + attrs_freq_counts[normalized_name][normalized_value] += 1 + + return dict(attrs_freq_counts) + def __str__(self) -> str: return f"Collection[{self.name}]" diff --git a/tests/models/test_collection.py b/tests/models/test_collection.py index e3d6b6c..e3ef1eb 100644 --- a/tests/models/test_collection.py +++ b/tests/models/test_collection.py @@ -2,10 +2,15 @@ from open_rarity.models.token import Token from open_rarity.models.token_metadata import ( StringAttribute, + TokenMetadata, ) from open_rarity.models.token_standard import TokenStandard -from tests.helpers import create_evm_token, create_numeric_evm_token +from tests.helpers import ( + create_evm_token, + create_numeric_evm_token, + generate_mixed_collection, +) class TestCollection: @@ -190,6 +195,63 @@ def test_has_numeric_attributes(self): assert self.test_numeric_attributes_collection.has_numeric_attribute assert not self.test_collection.has_numeric_attribute + def test_collection_without_attributes_init(self): + collection = Collection( + tokens=[ + create_evm_token( + token_id=1, + metadata=TokenMetadata.from_attributes( + { + "hat": "cap", + "bottom": "jeans", + "something another": "special", + } + ), + ), + create_evm_token( + token_id=2, + metadata=TokenMetadata.from_attributes( + { + "hat": "cap", + "bottom": "pjs", + "something another": "not special", + } + ), + ), + create_evm_token( + token_id=2, + metadata=TokenMetadata.from_attributes( + { + "hat": "bucket hat", + "new": "very special", + "integer trait - will not be shown": 1, + } + ), + ), + ] + ) + + assert collection.attributes_frequency_counts == { + "hat": { + "cap": 2, + "bucket hat": 1, + }, + "bottom": { + "jeans": 1, + "pjs": 1, + }, + "something another": {"special": 1, "not special": 1}, + "new": {"very special": 1}, + } + + def test_collection_without_attributes_init_equality(self): + large_collection = generate_mixed_collection() + comparable_collection = Collection(tokens=large_collection.tokens) + assert ( + comparable_collection.attributes_frequency_counts + == large_collection.attributes_frequency_counts + ) + def test_token_standards(self): assert self.test_collection.token_standards == [TokenStandard.ERC721] assert self.test_no_attributes_collection.token_standards == [ From 0135638e043aa9e556803d9b01b78788bc4c61a8 Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 12:52:10 -0700 Subject: [PATCH 3/7] Move metadata normalization into TokenMetadata vs. in Token --- open_rarity/models/collection.py | 7 ++-- open_rarity/models/token.py | 43 ----------------------- open_rarity/models/token_metadata.py | 51 ++++++++++++++++++++++++---- 3 files changed, 47 insertions(+), 54 deletions(-) diff --git a/open_rarity/models/collection.py b/open_rarity/models/collection.py index 6a62939..4372315 100644 --- a/open_rarity/models/collection.py +++ b/open_rarity/models/collection.py @@ -285,11 +285,10 @@ def _derive_normalized_attributes_frequency_counts( str_attr, ) in token.metadata.string_attributes.items(): normalized_name = normalize_attribute_string(attr_name) - normalized_value = normalize_attribute_string(str_attr.value) - if normalized_value not in attrs_freq_counts[attr_name]: - attrs_freq_counts[normalized_name][normalized_value] = 1 + if str_attr.value not in attrs_freq_counts[attr_name]: + attrs_freq_counts[normalized_name][str_attr.value] = 1 else: - attrs_freq_counts[normalized_name][normalized_value] += 1 + attrs_freq_counts[normalized_name][str_attr.value] += 1 return dict(attrs_freq_counts) diff --git a/open_rarity/models/token.py b/open_rarity/models/token.py index 4998b6e..ae44670 100644 --- a/open_rarity/models/token.py +++ b/open_rarity/models/token.py @@ -3,7 +3,6 @@ from open_rarity.models.token_identifier import TokenIdentifier from open_rarity.models.token_metadata import TokenMetadata from open_rarity.models.token_standard import TokenStandard -from open_rarity.models.utils.attribute_utils import normalize_attribute_string @dataclass @@ -26,47 +25,5 @@ class Token: token_standard: TokenStandard metadata: TokenMetadata - def __post_init__(self): - self.metadata = self._normalize_metadata(self.metadata) - - def _normalize_metadata(self, metadata: TokenMetadata) -> TokenMetadata: - """Normalizes token metadata to ensure the attribute names are lower cased - and whitespace stripped to ensure equality consistency. - - Parameters - ---------- - metadata : TokenMetadata - The original token metadata - - Returns - ------- - TokenMetadata - A new normalized token metadata - """ - - def normalize_and_reset(attributes_dict: dict): - """Helper function that takes in an attributes dictionary - and normalizes both attribute name in the dictionary as the key - and the repeated field inside the Attribute class - """ - normalized_attributes_dict = {} - - for attribute_name, attr in attributes_dict.items(): - normalized_attr_name = normalize_attribute_string( - attribute_name - ) - normalized_attributes_dict[normalized_attr_name] = attr - if attr.name != normalized_attr_name: - attr.name = normalized_attr_name - return normalized_attributes_dict - - return TokenMetadata( - string_attributes=normalize_and_reset(metadata.string_attributes), - numeric_attributes=normalize_and_reset( - metadata.numeric_attributes - ), - date_attributes=normalize_and_reset(metadata.date_attributes), - ) - def __str__(self): return f"Token[{self.token_identifier}]" diff --git a/open_rarity/models/token_metadata.py b/open_rarity/models/token_metadata.py index b138f6c..f99b0bf 100644 --- a/open_rarity/models/token_metadata.py +++ b/open_rarity/models/token_metadata.py @@ -45,6 +45,12 @@ class NumericAttribute: name: AttributeName value: float | int + def __init__(self, name: AttributeName, value: float | int): + # We treat attributes names the same regardless of + # casing or leading/trailing whitespaces. + self.name = normalize_attribute_string(name) + self.value = value + @dataclass class DateAttribute: @@ -61,6 +67,12 @@ class DateAttribute: name: AttributeName value: int + def __init__(self, name: AttributeName, value: int): + # We treat attributes names the same regardless of + # casing or leading/trailing whitespaces. + self.name = normalize_attribute_string(name) + self.value = value + @dataclass class TokenMetadata: @@ -74,6 +86,10 @@ class TokenMetadata: mapping of atrribute name to list of numeric attribute values date_attributes : dict mapping of attribute name to list of date attribute values + + + All attributes names are normalized and all string attribute values are + normalized in the same way - lowercased and leading/trailing whitespace stripped. """ string_attributes: dict[AttributeName, StringAttribute] = field( @@ -86,6 +102,28 @@ class TokenMetadata: default_factory=dict ) + def __post_init__(self): + self.string_attributes = self._normalize_attributes_dict( + self.string_attributes + ) + self.numeric_attributes = self._normalize_attributes_dict( + self.numeric_attributes + ) + self.date_attributes = self._normalize_attributes_dict( + self.date_attributes + ) + + def _normalize_attributes_dict(self, attributes_dict: dict) -> dict: + """Helper function that takes in an attributes dictionary + and normalizes attribute name in the dictionary to ensure all + letters are lower cases and whitespace is stripped. + """ + normalized_attributes_dict = {} + for attribute_name, attr in attributes_dict.items(): + normalized_attr_name = normalize_attribute_string(attribute_name) + normalized_attributes_dict[normalized_attr_name] = attr + return normalized_attributes_dict + @classmethod def from_attributes(cls, attributes: dict[AttributeName, Any]): """Constructs TokenMetadata class based on an attributes dictionary @@ -112,18 +150,17 @@ class attribute type numeric_attributes = {} date_attributes = {} for attr_name, attr_value in attributes.items(): - normalized_attr_name = normalize_attribute_string(attr_name) if isinstance(attr_value, str): - string_attributes[normalized_attr_name] = StringAttribute( - name=normalized_attr_name, value=attr_value + string_attributes[attr_name] = StringAttribute( + name=attr_name, value=attr_value ) elif isinstance(attr_value, (float, int)): - numeric_attributes[normalized_attr_name] = NumericAttribute( - name=normalized_attr_name, value=attr_value + numeric_attributes[attr_name] = NumericAttribute( + name=attr_name, value=attr_value ) elif isinstance(attr_value, datetime.datetime): - date_attributes[normalized_attr_name] = DateAttribute( - name=normalized_attr_name, + date_attributes[attr_name] = DateAttribute( + name=attr_name, value=int(attr_value.timestamp()), ) else: From 68bfa65ee5e877ba40fbbc999409fd0dd2264026 Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:19:38 -0700 Subject: [PATCH 4/7] removed if by accident --- open_rarity/models/token_metadata.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/open_rarity/models/token_metadata.py b/open_rarity/models/token_metadata.py index f99b0bf..674324e 100644 --- a/open_rarity/models/token_metadata.py +++ b/open_rarity/models/token_metadata.py @@ -122,6 +122,8 @@ def _normalize_attributes_dict(self, attributes_dict: dict) -> dict: for attribute_name, attr in attributes_dict.items(): normalized_attr_name = normalize_attribute_string(attribute_name) normalized_attributes_dict[normalized_attr_name] = attr + if normalized_attr_name != attr.name: + attr.name = normalized_attr_name return normalized_attributes_dict @classmethod From ce2546e964de593b56b96ffedc02fffd09bf05fc Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:46:15 -0700 Subject: [PATCH 5/7] move fn to where other private methods are --- open_rarity/models/collection.py | 70 ++++++++++++++++---------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/open_rarity/models/collection.py b/open_rarity/models/collection.py index 4372315..28c8630 100644 --- a/open_rarity/models/collection.py +++ b/open_rarity/models/collection.py @@ -142,41 +142,6 @@ def token_standards(self) -> list[TokenStandard]: token_standards.add(token.token_standard) return list(token_standards) - def _normalize_attributes_frequency_counts( - self, - attributes_frequency_counts: dict[ - AttributeName, dict[AttributeValue, int] - ], - ) -> dict[AttributeName, dict[AttributeValue, int]]: - """We normalize all collection attributes to ensure that neither casing nor - leading/trailing spaces produce different attributes: - (e.g. 'Hat' == 'hat' == 'hat ') - If a collection has the following in their attributes frequency counts: - ('Hat', 'beanie') 5 tokens and - ('hat', 'beanie') 10 tokens - this would produce: ('hat', 'beanie') 15 tokens - """ - normalized: dict[AttributeName, dict[AttributeValue, int]] = {} - for ( - attr_name, - attr_value_to_count, - ) in attributes_frequency_counts.items(): - normalized_name = normalize_attribute_string(attr_name) - if normalized_name not in normalized: - normalized[normalized_name] = {} - for attr_value, attr_count in attr_value_to_count.items(): - normalized_value = ( - normalize_attribute_string(attr_value) - if isinstance(attr_value, str) - else attr_value - ) - if normalized_value not in normalized[normalized_name]: - normalized[normalized_name][normalized_value] = attr_count - else: - normalized[normalized_name][normalized_value] += attr_count - - return normalized - def total_tokens_with_attribute(self, attribute: StringAttribute) -> int: """Returns the numbers of tokens in this collection with the attribute based on the attributes frequency counts. @@ -261,6 +226,41 @@ def extract_collection_attributes( return collection_traits + def _normalize_attributes_frequency_counts( + self, + attributes_frequency_counts: dict[ + AttributeName, dict[AttributeValue, int] + ], + ) -> dict[AttributeName, dict[AttributeValue, int]]: + """We normalize all collection attributes to ensure that neither casing nor + leading/trailing spaces produce different attributes: + (e.g. 'Hat' == 'hat' == 'hat ') + If a collection has the following in their attributes frequency counts: + ('Hat', 'beanie') 5 tokens and + ('hat', 'beanie') 10 tokens + this would produce: ('hat', 'beanie') 15 tokens + """ + normalized: dict[AttributeName, dict[AttributeValue, int]] = {} + for ( + attr_name, + attr_value_to_count, + ) in attributes_frequency_counts.items(): + normalized_name = normalize_attribute_string(attr_name) + if normalized_name not in normalized: + normalized[normalized_name] = {} + for attr_value, attr_count in attr_value_to_count.items(): + normalized_value = ( + normalize_attribute_string(attr_value) + if isinstance(attr_value, str) + else attr_value + ) + if normalized_value not in normalized[normalized_name]: + normalized[normalized_name][normalized_value] = attr_count + else: + normalized[normalized_name][normalized_value] += attr_count + + return normalized + def _derive_normalized_attributes_frequency_counts( self, ) -> dict[AttributeName, dict[AttributeValue, int]]: From 0f3f3ccf071c761c56eda3013324008f0250c49d Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:46:41 -0700 Subject: [PATCH 6/7] added token erc721 factory method --- open_rarity/models/token.py | 48 ++++++++++++++++++++++- scripts/score_generated_collection.py | 55 +++++++-------------------- tests/models/test_token.py | 29 ++++++++++++++ 3 files changed, 88 insertions(+), 44 deletions(-) diff --git a/open_rarity/models/token.py b/open_rarity/models/token.py index ae44670..c5cc96f 100644 --- a/open_rarity/models/token.py +++ b/open_rarity/models/token.py @@ -1,7 +1,11 @@ from dataclasses import dataclass +from typing import Any -from open_rarity.models.token_identifier import TokenIdentifier -from open_rarity.models.token_metadata import TokenMetadata +from open_rarity.models.token_identifier import ( + EVMContractTokenIdentifier, + TokenIdentifier, +) +from open_rarity.models.token_metadata import AttributeName, TokenMetadata from open_rarity.models.token_standard import TokenStandard @@ -25,5 +29,45 @@ class Token: token_standard: TokenStandard metadata: TokenMetadata + @classmethod + def from_erc721( + cls, + contract_address: str, + token_id: int, + metadata_dict: dict[AttributeName, Any], + ): + """Creates a Token class representing an ERC721 evm token given the following + parameters. + + Parameters + ---------- + contract_address : str + Contract address of the token + token_id : int + Token ID number of the token + metadata_dict : dict + Dictionary of attribute name to attribute value for the given token. + The type of the value determines whether the attribute is a string, + numeric or date attribute. + + class attribute type + ------------ ------------- + string string attribute + int | float numeric_attribute + datetime date_attribute (stored as timestamp, seconds from epoch) + + Returns + ------- + Token + A Token instance with EVMContractTokenIdentifier and ERC721 standard set. + """ + return cls( + token_identifier=EVMContractTokenIdentifier( + contract_address=contract_address, token_id=token_id + ), + token_standard=TokenStandard.ERC721, + metadata=TokenMetadata.from_attributes(metadata_dict), + ) + def __str__(self): return f"Token[{self.token_identifier}]" diff --git a/scripts/score_generated_collection.py b/scripts/score_generated_collection.py index 65d0487..d0fb490 100644 --- a/scripts/score_generated_collection.py +++ b/scripts/score_generated_collection.py @@ -1,58 +1,29 @@ from open_rarity import ( Collection, - EVMContractTokenIdentifier, OpenRarityScorer, - StringAttribute, Token, - TokenMetadata, - TokenStandard, ) if __name__ == "__main__": scorer = OpenRarityScorer() - # A collection of 2 tokens + collection = Collection( name="My Collection Name", - attributes_frequency_counts={ - "hat": {"cap": 1, "visor": 2}, - "shirt": {"blue": 2, "green": 1}, - }, tokens=[ - Token( - token_identifier=EVMContractTokenIdentifier( - contract_address="0xa3049...", token_id=1 - ), - token_standard=TokenStandard.ERC721, - metadata=TokenMetadata( - string_attributes={ - "hat": StringAttribute(name="hat", value="cap"), - "shirt": StringAttribute(name="shirt", value="blue"), - } - ), + Token.from_erc721( + contract_address="0xa3049...", + token_id=1, + metadata_dict={"hat": "cap", "shirt": "blue"}, ), - Token( - token_identifier=EVMContractTokenIdentifier( - contract_address="0xa3049...", token_id=2 - ), - token_standard=TokenStandard.ERC721, - metadata=TokenMetadata( - string_attributes={ - "hat": StringAttribute(name="hat", value="visor"), - "shirt": StringAttribute(name="shirt", value="green"), - } - ), + Token.from_erc721( + contract_address="0xa3049...", + token_id=2, + metadata_dict={"hat": "visor", "shirt": "green"}, ), - Token( - token_identifier=EVMContractTokenIdentifier( - contract_address="0xa3049...", token_id=3 - ), - token_standard=TokenStandard.ERC721, - metadata=TokenMetadata( - string_attributes={ - "hat": StringAttribute(name="hat", value="visor"), - "shirt": StringAttribute(name="shirt", value="blue"), - } - ), + Token.from_erc721( + contract_address="0xa3049...", + token_id=3, + metadata_dict={"hat": "visor", "shirt": "blue"}, ), ], ) # Replace inputs with your collection-specific details here diff --git a/tests/models/test_token.py b/tests/models/test_token.py index 51873a4..761f438 100644 --- a/tests/models/test_token.py +++ b/tests/models/test_token.py @@ -1,13 +1,42 @@ +from open_rarity.models.token import Token +from open_rarity.models.token_identifier import EVMContractTokenIdentifier from open_rarity.models.token_metadata import ( NumericAttribute, StringAttribute, TokenMetadata, ) +from open_rarity.models.token_standard import TokenStandard from tests.helpers import create_evm_token class TestToken: + def test_create_erc721(self): + token = Token( + token_identifier=EVMContractTokenIdentifier( + contract_address="0xa3049...", token_id=1 + ), + token_standard=TokenStandard.ERC721, + metadata=TokenMetadata.from_attributes( + {"hat": "cap", "shirt": "blue"} + ), + ) + token_equal = Token.from_erc721( + contract_address="0xa3049...", + token_id=1, + metadata_dict={"hat": "cap", "shirt": "blue"}, + ) + + assert token == token_equal + + token_not_equal = Token.from_erc721( + contract_address="0xmew...", + token_id=1, + metadata_dict={"hat": "cap", "shirt": "blue"}, + ) + + assert token != token_not_equal + def test_token_init_metadata_non_matching_attribute_names(self): token = create_evm_token( token_id=1, From 3075be9abf3cde44b11f4d863d444a4192d57981 Mon Sep 17 00:00:00 2001 From: vickygos <93538907+vickygos@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:55:11 -0700 Subject: [PATCH 7/7] Update score_generated_collection.py --- scripts/score_generated_collection.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/scripts/score_generated_collection.py b/scripts/score_generated_collection.py index d0fb490..9d8af13 100644 --- a/scripts/score_generated_collection.py +++ b/scripts/score_generated_collection.py @@ -3,6 +3,7 @@ OpenRarityScorer, Token, ) +from open_rarity.rarity_ranker import RarityRanker if __name__ == "__main__": scorer = OpenRarityScorer() @@ -37,4 +38,10 @@ token = collection.tokens[0] # Your token details filled in token_score = scorer.score_token(collection=collection, token=token) - print(f"Token score: {token_score}") + # Better yet.. just use ranker directly! + ranked_tokens = RarityRanker.rank_collection(collection=collection) + for ranked_token in ranked_tokens: + print( + f"Token {ranked_token.token} has rank {ranked_token.rank} " + "and score {ranked_token.score}" + )