Skip to content

Commit

Permalink
Merge pull request #83 from OpenRarity/vicky/small-fixes
Browse files Browse the repository at this point in the history
Small fixes
  • Loading branch information
vickygos authored Oct 14, 2022
2 parents 92bae2d + ac864c3 commit fd351f8
Show file tree
Hide file tree
Showing 3 changed files with 132 additions and 14 deletions.
4 changes: 3 additions & 1 deletion open_rarity/resolver/opensea_api_helpers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import json
import logging
import math
import os

import requests
from requests.models import HTTPError
Expand All @@ -25,7 +26,8 @@

HEADERS = {
"Accept": "application/json",
"X-API-KEY": "",
# Note: API key not required but will help rate limiting
"X-API-KEY": os.environ.get("OS_API_KEY") or "",
}

# https://docs.opensea.io/docs/metadata-standards
Expand Down
1 change: 1 addition & 0 deletions open_rarity/resolver/rarity_providers/trait_sniper.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def get_all_ranks(contract_address: str) -> dict[str, int]:
return {
str(rank_data["token_id"]): int(rank_data["rarity_rank"])
for rank_data in all_rank_data
if rank_data["rarity_rank"]
}

@staticmethod
Expand Down
141 changes: 128 additions & 13 deletions tests/resolver/test_testset_resolver.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import csv

import pytest

from open_rarity.resolver.models.token_with_rarity_data import RankProvider
Expand Down Expand Up @@ -42,47 +44,54 @@ class TestTestsetResolver:
},
}

EXPECTED_COLUMNS = [
"slug",
"token_id",
"traits_sniper",
"rarity_sniffer",
"rarity_sniper",
"arithmetic",
"geometric",
"harmonic",
"sum",
"information_content",
]

@pytest.mark.skipif(
"not config.getoption('--run-resolvers')",
reason="This tests runs too long to have as part of CI/CD but should be "
"run whenver someone changes resolver",
)
def test_resolve_collection_data(self):
def test_resolve_collection_data_two_providers(self):
# Have the resolver pull in BAYC rarity rankings from various sources
# Just do a check to ensure the ranks from different providers are
# as expected
resolve_collection_data(
resolve_remote_rarity=True,
package_path="tests",
# We exclude trait sniper due to API key requirements
external_rank_providers=[
RankProvider.RARITY_SNIFFER,
RankProvider.RARITY_SNIPER,
],
filename="resolver/sample_files/bayc.json",
# max_tokens_to_calculate=100,
)
# Read the file and verify columns values are as expected for the given tokens
output_filename = "testset_boredapeyachtclub.csv"
import csv

rows = 0
with open(output_filename) as csvfile:
resolver_output_reader = csv.reader(csvfile)
for idx, row in enumerate(resolver_output_reader):
rows += 1
if idx == 0:
assert row[0] == "slug"
assert row[1] == "token_id"
assert row[2] == "traits_sniper"
assert row[3] == "rarity_sniffer"
assert row[4] == "rarity_sniper"
assert row[5] == "arithmetic"
assert row[6] == "geometric"
assert row[7] == "harmonic"
assert row[8] == "sum"
assert row[9] == "information_content"
assert row[0:10] == self.EXPECTED_COLUMNS
else:
token_id = int(row[1])
if token_id in self.bayc_token_ids_to_ranks:
assert row[0] == "boredapeyachtclub"
expected_ranks = self.bayc_token_ids_to_ranks[token_id]
assert row[2] == expected_ranks[RankProvider.TRAITS_SNIPER]
assert row[3] == expected_ranks[RankProvider.RARITY_SNIFFER]
assert row[4] == expected_ranks[RankProvider.RARITY_SNIPER]
assert row[5] == expected_ranks[RankProvider.OR_ARITHMETIC]
Expand All @@ -92,3 +101,109 @@ def test_resolve_collection_data(self):
)

assert rows == 10_001

@pytest.mark.skipif(
"not config.getoption('--run-resolvers')",
reason="This tests runs too long to have as part of CI/CD but should be "
"run whenver someone changes resolver and requires TRAIT_SNIPER_API_KEY",
)
def test_resolve_collection_data_traits_sniper(self):
# Have the resolver pull in BAYC rarity rankings from various sources
# Just do a check to ensure the ranks from different providers are
# as expected
resolve_collection_data(
resolve_remote_rarity=True,
package_path="tests",
external_rank_providers=[RankProvider.TRAITS_SNIPER],
filename="resolver/sample_files/bayc.json",
)
# Read the file and verify columns values are as expected for the given tokens
output_filename = "testset_boredapeyachtclub.csv"

rows = 0
with open(output_filename) as csvfile:
resolver_output_reader = csv.reader(csvfile)
for idx, row in enumerate(resolver_output_reader):
rows += 1
if idx == 0:
assert row[0:10] == self.EXPECTED_COLUMNS
else:
token_id = int(row[1])
if token_id in self.bayc_token_ids_to_ranks:
assert row[0] == "boredapeyachtclub"
expected_ranks = self.bayc_token_ids_to_ranks[token_id]
assert row[2] == expected_ranks[RankProvider.TRAITS_SNIPER]
assert row[5] == expected_ranks[RankProvider.OR_ARITHMETIC]
assert (
row[9]
== expected_ranks[RankProvider.OR_INFORMATION_CONTENT]
)

assert rows == 10_001

def test_resolve_collection_data_rarity_sniffer(self):
# Have the resolver pull in BAYC rarity rankings from various sources
# Just do a check to ensure the ranks from different providers are
# as expected
resolve_collection_data(
resolve_remote_rarity=True,
package_path="tests",
external_rank_providers=[RankProvider.RARITY_SNIFFER],
filename="resolver/sample_files/bayc.json",
)
# Read the file and verify columns values are as expected for the given tokens
output_filename = "testset_boredapeyachtclub.csv"

rows = 0
with open(output_filename) as csvfile:
resolver_output_reader = csv.reader(csvfile)
for idx, row in enumerate(resolver_output_reader):
rows += 1
if idx == 0:
assert row[0:10] == self.EXPECTED_COLUMNS
else:
token_id = int(row[1])
if token_id in self.bayc_token_ids_to_ranks:
assert row[0] == "boredapeyachtclub"
expected_ranks = self.bayc_token_ids_to_ranks[token_id]
assert row[3] == expected_ranks[RankProvider.RARITY_SNIFFER]
assert row[5] == expected_ranks[RankProvider.OR_ARITHMETIC]
assert (
row[9]
== expected_ranks[RankProvider.OR_INFORMATION_CONTENT]
)

assert rows == 10_001

def test_resolve_collection_data_no_external(self):
# Have the resolver pull in BAYC rarity rankings from various sources
# Just do a check to ensure the ranks from different providers are
# as expected
resolve_collection_data(
resolve_remote_rarity=True,
package_path="tests",
external_rank_providers=[],
filename="resolver/sample_files/bayc.json",
)
# Read the file and verify columns values are as expected for the given tokens
output_filename = "testset_boredapeyachtclub.csv"

rows = 0
with open(output_filename) as csvfile:
resolver_output_reader = csv.reader(csvfile)
for idx, row in enumerate(resolver_output_reader):
rows += 1
if idx == 0:
assert row[0:10] == self.EXPECTED_COLUMNS
else:
token_id = int(row[1])
if token_id in self.bayc_token_ids_to_ranks:
assert row[0] == "boredapeyachtclub"
expected_ranks = self.bayc_token_ids_to_ranks[token_id]
assert row[5] == expected_ranks[RankProvider.OR_ARITHMETIC]
assert (
row[9]
== expected_ranks[RankProvider.OR_INFORMATION_CONTENT]
)

assert rows == 10_001

0 comments on commit fd351f8

Please sign in to comment.