Skip to content

Commit

Permalink
my changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Andful committed May 31, 2024
1 parent 66c3b26 commit 909dfad
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,14 @@ def __init__(
workload: Workload | None = None,
accelerator: Accelerator | None = None,
node_hw_performances: dict[ComputationNode, dict[Core, CostModelEvaluation]] | None = None,
original_workload: Workload | None = None
) -> None:
self.workload = workload
self.accelerator = accelerator
self.node_hw_performances = node_hw_performances
# self.num_cores = len(inputs.accelerator.cores)

def get_fitness(self):
def get_fitness(self, core_allocations: list, return_scme=False):
raise NotImplementedError


Expand Down
34 changes: 25 additions & 9 deletions stream/classes/stages/InterCoreMappingStage.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
GeneticAlgorithm,
)
from stream.classes.opt.allocation.genetic_algorithm.fitness_evaluator import (
StandardFitnessEvaluator,
StandardFitnessEvaluator, FitnessEvaluator
)
from stream.utils import get_too_large_operands
from zigzag.workload.Workload import Workload
from typing import Type, TypeVar

logger = logging.getLogger(__name__)

TFitnessEvaluator = TypeVar("TFitnessEvaluator", bound=FitnessEvaluator)

class InterCoreMappingStage(Stage):
"""
Expand All @@ -44,6 +46,7 @@ def __init__(
plot_full_schedule: bool = False,
plot_data_transfer: bool = False,
operands_to_prefetch: list[LayerOperand],
custom_fitness_evaluator: Type[TFitnessEvaluator] | None = None,
**kwargs,
):
"""Initialize the InterCoreMappingStage.
Expand All @@ -68,6 +71,8 @@ def __init__(
self.plot_data_transfer = plot_data_transfer
self.operands_to_prefetch = operands_to_prefetch
self.scheduling_order = kwargs.get("scheduling_order", None)
self.original_workload = kwargs["original_workload"]
self.custom_fitness_evaluator = custom_fitness_evaluator

# Determine the set of all (layer, group) combinations to be allocated separately
self.layer_groups: list[tuple[int, int]] = sorted(set((n.id, n.group) for n in self.workload.nodes()))
Expand Down Expand Up @@ -102,14 +107,25 @@ def __init__(
self.set_hw_performance_non_flexible_nodes()

# Initialize the fitness evaluator of different core allocations
self.fitness_evaluator = StandardFitnessEvaluator(
self.workload,
self.accelerator,
self.node_hw_performances,
self.layer_groups_flexible,
self.operands_to_prefetch,
self.scheduling_order,
)
if self.custom_fitness_evaluator is not None:
self.fitness_evaluator = self.custom_fitness_evaluator(
self.workload,
self.accelerator,
self.node_hw_performances,
self.layer_groups_flexible,
self.scheduling_order,
self.operands_to_prefetch,
self.original_workload,
)
else:
self.fitness_evaluator = StandardFitnessEvaluator(
self.workload,
self.accelerator,
self.node_hw_performances,
self.layer_groups_flexible,
self.operands_to_prefetch,
self.scheduling_order,
)

# Extract the length of an individual.
# This is the number of unique original nodes that have more than one possible core allocation
Expand Down

0 comments on commit 909dfad

Please sign in to comment.