Skip to content

Commit

Permalink
🤖 Lint code
Browse files Browse the repository at this point in the history
  • Loading branch information
ivy-branch committed Jan 21, 2024
1 parent 882e6aa commit 5d77c2a
Showing 1 changed file with 65 additions and 57 deletions.
122 changes: 65 additions & 57 deletions ivy/utils/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,17 @@ def __exit__(self, *exc):
stats.print_stats()


def tensorflow_profile_start(logdir: str,
host_tracer_level: int=2,
python_tracer_level: int=0,
device_tracer_level: int=1,
delay_ms: int=None
def tensorflow_profile_start(
logdir: str,
host_tracer_level: int = 2,
python_tracer_level: int = 0,
device_tracer_level: int = 1,
delay_ms: int = None,
):
"""
Initialize and start the profiler.
"""Initialize and start the profiler.
Parameters
----------
logdir: str
Directory where the profile data will be saved to.
host_tracer_level: int
Expand All @@ -76,42 +75,45 @@ def tensorflow_profile_start(logdir: str,
device_tracer_level: int
Adjust device (TPU/GPU) tracing level. Values are: 1 - enabled, 0 - disabled [default value is 1]
delay_ms: int
Requests for all hosts to start profiling at a timestamp that is delay_ms away from the current time. delay_ms is in milliseconds. If zero, each host will start profiling immediately upon receiving the request. Default value is None, allowing the profiler guess the best value.
Requests for all hosts to start profiling at a timestamp that is delay_ms away from the current time. delay_ms is in milliseconds. If zero, each host will start profiling immediately upon receiving the request. Default value is None, allowing the profiler guess the best value.
Save the weights on the Module.
Returns
-------
None
"""
from tensorflow.profiler.experimental import ProfilerOptions, start
options = ProfilerOptions(host_tracer_level = host_tracer_level,
python_tracer_level = python_tracer_level,
device_tracer_level = device_tracer_level,
delay_ms = delay_ms)
start(logdir, options = options)

options = ProfilerOptions(
host_tracer_level=host_tracer_level,
python_tracer_level=python_tracer_level,
device_tracer_level=device_tracer_level,
delay_ms=delay_ms,
)
start(logdir, options=options)


def tensorflow_profile_stop():
"""
Stop the profiler.
"""
"""Stop the profiler."""
from tensorflow.profiler.experimental import stop
stop()


def torch_profiler_init( logdir = None,
activities = None,
schedule = None,
on_trace_ready = None,
record_shapes = False,
profile_memory = False,
with_stack = False,
with_flops = False,
with_modules = False,
experimental_config = None):
'''
Initialize and returns a Torch profiler instance.

stop()


def torch_profiler_init(
logdir=None,
activities=None,
schedule=None,
on_trace_ready=None,
record_shapes=False,
profile_memory=False,
with_stack=False,
with_flops=False,
with_modules=False,
experimental_config=None,
):
"""Initialize and returns a Torch profiler instance.
Parameters
----------
logdir : str
Expand All @@ -121,86 +123,92 @@ def torch_profiler_init( logdir = None,
list of activity groups (CPU, CUDA) to use in profiling, supported values:
``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
schedule : Callable
callable that takes step (int) as a single parameter and returns
``ProfilerAction`` value that specifies the profiler action to perform at each step.
on_trace_ready : Callable
callable that is called at each step when ``schedule``
returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
record_shapes : bool
save information about operator's input shapes.
profile_memory : bool
track tensor memory allocation/deallocation.
with_stack : bool
record source information (file and line number) for the ops.
with_flops : bool
use formula to estimate the FLOPs (floating point operations) of specific operators
(matrix multiplication and 2D convolution).
with_modules : bool
record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op, then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
experimental_config _ExperimentalConfig : _ExperimentalConfig
A set of experimental options
used for Kineto library features. Note, backward compatibility is not guaranteed.
Returns
-------
-------
Torch profiler instance.
'''
"""
from torch.profiler import profile, tensorboard_trace_handler

profiler = profile(
activities=activities,
schedule=schedule,
on_trace_ready=tensorboard_trace_handler(logdir) if on_trace_ready is None and logdir is not None else on_trace_ready,
on_trace_ready=(
tensorboard_trace_handler(logdir)
if on_trace_ready is None and logdir is not None
else on_trace_ready
),
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
experimental_config=experimental_config)
experimental_config=experimental_config,
)
return profiler


def torch_profiler_start(profiler):
'''
Start the profiler.
"""Start the profiler.
Parameters
----------
profiler : torch.profiler.profile
Torch profiler instance.
Returns
-------
None
'''
"""
profiler.start()


def torch_profiler_stop(profiler):
'''
Start the profiler.
"""Start the profiler.
Parameters
----------
profiler : torch.profiler.profile
Torch profiler instance.
Returns
-------
None
'''
from torch.autograd.profiler import KinetoStepTracker
"""
from torch.autograd.profiler import KinetoStepTracker
from torch.profiler.profiler import PROFILER_STEP_NAME

profiler.stop()
KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)
KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)

0 comments on commit 5d77c2a

Please sign in to comment.