code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def set_thread_sleeping(tid: int) -> None:
"""Indicate the given thread is sleeping.
Used to attribute CPU time.
"""
Scalene.__is_thread_sleeping[tid] = True | Indicate the given thread is sleeping.
Used to attribute CPU time. | set_thread_sleeping | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def reset_thread_sleeping(tid: int) -> None:
"""Indicate the given thread is not sleeping.
Used to attribute CPU time."""
Scalene.__is_thread_sleeping[tid] = False | Indicate the given thread is not sleeping.
Used to attribute CPU time. | reset_thread_sleeping | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def windows_timer_loop() -> None:
"""For Windows, send periodic timer signals; launch as a background thread."""
assert sys.platform == "win32"
Scalene.timer_signals = True
while Scalene.timer_signals:
Scalene.__windows_queue.get()
time.sleep(Scalene.__args.cpu_sampling_rate)
Scalene.__orig_raise_signal(Scalene.__signals.cpu_signal) | For Windows, send periodic timer signals; launch as a background thread. | windows_timer_loop | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start_signal_queues() -> None:
"""Start the signal processing queues (i.e., their threads)."""
for sigq in Scalene.__sigqueues:
sigq.start() | Start the signal processing queues (i.e., their threads). | start_signal_queues | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop_signal_queues() -> None:
"""Stop the signal processing queues (i.e., their threads)."""
for sigq in Scalene.__sigqueues:
sigq.stop() | Stop the signal processing queues (i.e., their threads). | stop_signal_queues | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def term_signal_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle terminate signals."""
Scalene.stop()
Scalene.output_profile()
Scalene.__orig_exit(Scalene.__sigterm_exit_code) | Handle terminate signals. | term_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def malloc_signal_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle allocation signals."""
if not Scalene.__args.memory:
# This should never happen, but we fail gracefully.
return
from scalene import pywhere # type: ignore
if this_frame:
Scalene.enter_function_meta(this_frame, Scalene.__stats)
# Walk the stack till we find a line of code in a file we are tracing.
found_frame = False
f = this_frame
while f:
if found_frame := Scalene.should_trace(
f.f_code.co_filename, f.f_code.co_name
):
break
f = cast(FrameType, f.f_back)
if not found_frame:
return
assert f
# Start tracing until we execute a different line of
# code in a file we are tracking.
# First, see if we have now executed a different line of code.
# If so, increment.
invalidated = pywhere.get_last_profiled_invalidated()
(fname, lineno, lasti) = Scalene.last_profiled_tuple()
if (
not invalidated
and this_frame
and not (on_stack(this_frame, fname, lineno))
):
Scalene.update_profiled()
pywhere.set_last_profiled_invalidated_false()
# In the setprofile callback, we rely on
# __last_profiled always having the same memory address.
# This is an optimization to not have to traverse the Scalene profiler
# object's dictionary every time we want to update the last profiled line.
#
# A previous change to this code set Scalene.__last_profiled = [fname, lineno, lasti],
# which created a new list object and set the __last_profiled attribute to the new list. This
# made the object held in `pywhere.cpp` out of date, and caused the profiler to not update the last profiled line.
Scalene.__last_profiled[:] = [
Filename(f.f_code.co_filename),
LineNumber(f.f_lineno),
ByteCodeIndex(f.f_lasti),
]
Scalene.__alloc_sigq.put([0])
pywhere.enable_settrace(this_frame)
del this_frame | Handle allocation signals. | malloc_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def free_signal_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle free signals."""
if this_frame:
Scalene.enter_function_meta(this_frame, Scalene.__stats)
Scalene.__alloc_sigq.put([0])
del this_frame | Handle free signals. | free_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def memcpy_signal_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle memcpy signals."""
Scalene.__memcpy_sigq.put((signum, this_frame))
del this_frame | Handle memcpy signals. | memcpy_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def enable_signals() -> None:
"""Set up the signal handlers to handle interrupts for profiling and start the
timer interrupts."""
if sys.platform == "win32":
Scalene.enable_signals_win32()
return
Scalene.start_signal_queues()
# Set signal handlers for various events.
for sig, handler in [
(Scalene.__signals.malloc_signal, Scalene.malloc_signal_handler),
(Scalene.__signals.free_signal, Scalene.free_signal_handler),
(Scalene.__signals.memcpy_signal, Scalene.memcpy_signal_handler),
(signal.SIGTERM, Scalene.term_signal_handler),
(Scalene.__signals.cpu_signal, Scalene.cpu_signal_handler),
]:
Scalene.__orig_signal(sig, handler)
# Set every signal to restart interrupted system calls.
for s in Scalene.__signals.get_all_signals():
Scalene.__orig_siginterrupt(s, False)
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
) | Set up the signal handlers to handle interrupts for profiling and start the
timer interrupts. | enable_signals | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def cpu_signal_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle CPU signals."""
try:
# Get current time stats.
now_sys, now_user = get_times()
now_virtual = time.process_time()
now_wallclock = time.perf_counter()
if (
Scalene.__last_signal_time.virtual == 0
or Scalene.__last_signal_time.wallclock == 0
):
# Initialization: store values and update on the next pass.
Scalene.__last_signal_time.virtual = now_virtual
Scalene.__last_signal_time.wallclock = now_wallclock
Scalene.__last_signal_time.sys = now_sys
Scalene.__last_signal_time.user = now_user
if sys.platform != "win32":
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
)
return
if Scalene.__accelerator:
(gpu_load, gpu_mem_used) = Scalene.__accelerator.get_stats()
else:
(gpu_load, gpu_mem_used) = (0.0, 0.0)
# Process this CPU sample.
Scalene.process_cpu_sample(
signum,
Scalene.compute_frames_to_record(),
now_virtual,
now_wallclock,
now_sys,
now_user,
gpu_load,
gpu_mem_used,
Scalene.__last_signal_time.virtual,
Scalene.__last_signal_time.wallclock,
Scalene.__last_signal_time.sys,
Scalene.__last_signal_time.user,
Scalene.__is_thread_sleeping,
)
elapsed = now_wallclock - Scalene.__last_signal_time.wallclock
# Store the latest values as the previously recorded values.
Scalene.__last_signal_time.virtual = now_virtual
Scalene.__last_signal_time.wallclock = now_wallclock
Scalene.__last_signal_time.sys = now_sys
Scalene.__last_signal_time.user = now_user
# Restart the timer while handling any timers set by the client.
if sys.platform != "win32":
if Scalene.client_timer.is_set:
(
should_raise,
remaining_time,
) = Scalene.client_timer.yield_next_delay(elapsed)
if should_raise:
Scalene.__orig_raise_signal(signal.SIGUSR1)
# NOTE-- 0 will only be returned if the 'seconds' have elapsed
# and there is no interval
to_wait: float
if remaining_time > 0:
to_wait = min(
remaining_time, Scalene.__args.cpu_sampling_rate
)
else:
to_wait = Scalene.__args.cpu_sampling_rate
Scalene.client_timer.reset()
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
to_wait,
)
else:
Scalene.__orig_setitimer(
Scalene.__signals.cpu_timer_signal,
Scalene.__args.cpu_sampling_rate,
)
finally:
if sys.platform == "win32":
Scalene.__windows_queue.put(None) | Handle CPU signals. | cpu_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def output_profile(program_args: Optional[List[str]] = None) -> bool:
"""Output the profile. Returns true iff there was any info reported the profile."""
# sourcery skip: inline-immediately-returned-variable
# print(flamegraph_format(Scalene.__stats.stacks))
if Scalene.__args.json:
json_output = Scalene.__json.output_profiles(
Scalene.__program_being_profiled,
Scalene.__stats,
Scalene.__pid,
Scalene.profile_this_code,
Scalene.__python_alias_dir,
Scalene.__program_path,
Scalene.__entrypoint_dir,
program_args,
profile_memory=Scalene.__args.memory,
reduced_profile=Scalene.__args.reduced_profile,
)
# Since the default value returned for "there are no samples"
# is `{}`, we use a sentinel value `{"is_child": True}`
# when inside a child process to indicate that there are samples, but they weren't
# turned into a JSON file because they'll later
# be used by the parent process
if "is_child" in json_output:
return True
outfile = Scalene.__output.output_file
if Scalene.__args.outfile:
outfile = os.path.join(
os.path.dirname(Scalene.__args.outfile),
os.path.splitext(os.path.basename(Scalene.__args.outfile))[0] + ".json"
)
# outfile = Scalene.__args.outfile
# If there was no output file specified, print to the console.
if not outfile:
if sys.platform == "win32":
outfile = "CON"
else:
outfile = "/dev/stdout"
# Write the JSON to the output file (or console).
with open(outfile, "w") as f:
f.write(
json.dumps(json_output, sort_keys=True, indent=4) + "\n"
)
return json_output != {}
else:
output = Scalene.__output
column_width = Scalene.__args.column_width
if not Scalene.__args.html:
# Get column width of the terminal and adjust to fit.
with contextlib.suppress(Exception):
# If we are in a Jupyter notebook, stick with 132
if "ipykernel" in sys.modules:
column_width = 132
else:
import shutil
column_width = shutil.get_terminal_size().columns
did_output: bool = output.output_profiles(
column_width,
Scalene.__stats,
Scalene.__pid,
Scalene.profile_this_code,
Scalene.__python_alias_dir,
Scalene.__program_path,
program_args,
profile_memory=Scalene.__args.memory,
reduced_profile=Scalene.__args.reduced_profile,
)
return did_output | Output the profile. Returns true iff there was any info reported the profile. | output_profile | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def profile_this_code(fname: Filename, lineno: LineNumber) -> bool:
# sourcery skip: inline-immediately-returned-variable
"""When using @profile, only profile files & lines that have been decorated."""
if not Scalene.__files_to_profile:
return True
if fname not in Scalene.__files_to_profile:
return False
# Now check to see if it's the right line range.
line_info = Scalene.get_line_info(fname)
found_function = any(
line_start <= lineno < line_start + len(lines)
for (lines, line_start) in line_info
)
return found_function | When using @profile, only profile files & lines that have been decorated. | profile_this_code | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def process_cpu_sample(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
new_frames: List[Tuple[FrameType, int, FrameType]],
now_virtual: float,
now_wallclock: float,
now_sys: float,
now_user: float,
gpu_load: float,
gpu_mem_used: float,
prev_virtual: float,
prev_wallclock: float,
_prev_sys: float,
prev_user: float,
is_thread_sleeping: Dict[int, bool],
) -> None:
"""Handle interrupts for CPU profiling."""
# We have recorded how long it has been since we received a timer
# before. See the logic below.
# If it's time to print some profiling info, do so.
if now_wallclock >= Scalene.__next_output_time:
# Print out the profile. Set the next output time, stop
# signals, print the profile, and then start signals
# again.
Scalene.__next_output_time += Scalene.__args.profile_interval
stats = Scalene.__stats
# pause (lock) all the queues to prevent updates while we output
with contextlib.ExitStack() as stack:
_ = [stack.enter_context(s.lock) for s in Scalene.__sigqueues]
stats.stop_clock()
Scalene.output_profile()
stats.start_clock()
if not new_frames:
# No new frames, so nothing to update.
return
# Here we take advantage of an ostensible limitation of Python:
# it only delivers signals after the interpreter has given up
# control. This seems to mean that sampling is limited to code
# running purely in the interpreter, and in fact, that was a limitation
# of the first version of Scalene, meaning that native code was entirely ignored.
#
# (cf. https://docs.python.org/3.9/library/signal.html#execution-of-python-signal-handlers)
#
# However: lemons -> lemonade: this "problem" is in fact
# an effective way to separate out time spent in
# Python vs. time spent in native code "for free"! If we get
# the signal immediately, we must be running in the
# interpreter. On the other hand, if it was delayed, that means
# we are running code OUTSIDE the interpreter, e.g.,
# native code (be it inside of Python or in a library). We
# account for this time by tracking the elapsed (process) time
# and compare it to the interval, and add any computed delay
# (as if it were sampled) to the C counter.
elapsed_virtual = now_virtual - prev_virtual
elapsed_wallclock = now_wallclock - prev_wallclock
# CPU utilization is the fraction of time spent on the CPU
# over the total time.
elapsed_user = now_user - prev_user
if any([elapsed_virtual < 0, elapsed_wallclock < 0, elapsed_user < 0]):
# If we get negative values, which appear to arise in some
# multi-process settings (seen in gunicorn), skip this
# sample.
return
cpu_utilization = 0.0
if elapsed_wallclock != 0:
cpu_utilization = elapsed_user / elapsed_wallclock
# On multicore systems running multi-threaded native code, CPU
# utilization can exceed 1; that is, elapsed user time is
# longer than elapsed wallclock time. If this occurs, set
# wall clock time to user time and set CPU utilization to 100%.
core_utilization = cpu_utilization / Scalene.__availableCPUs
if cpu_utilization > 1.0:
cpu_utilization = 1.0
elapsed_wallclock = elapsed_user
# Deal with an odd case reported here: https://github.com/plasma-umass/scalene/issues/124
# (Note: probably obsolete now that Scalene is using the nvidia wrappers, but just in case...)
# We don't want to report 'nan', so turn the load into 0.
if math.isnan(gpu_load):
gpu_load = 0.0
assert gpu_load >= 0.0 and gpu_load <= 1.0
gpu_time = gpu_load * elapsed_wallclock
Scalene.__stats.total_gpu_samples += gpu_time
python_time = Scalene.__args.cpu_sampling_rate
c_time = elapsed_virtual - python_time
c_time = max(c_time, 0)
# Now update counters (weighted) for every frame we are tracking.
total_time = python_time + c_time
# First, find out how many frames are not sleeping. We need
# to know this number so we can parcel out time appropriately
# (equally to each running thread).
total_frames = sum(
not is_thread_sleeping[tident]
for frame, tident, orig_frame in new_frames
)
if total_frames == 0:
total_frames = 1
normalized_time = total_time / total_frames
# Now attribute execution time.
main_thread_frame = new_frames[0][0]
average_python_time = python_time / total_frames
average_c_time = c_time / total_frames
average_cpu_time = (python_time + c_time) / total_frames
if Scalene.__args.stacks:
add_stack(
main_thread_frame,
Scalene.should_trace,
Scalene.__stats.stacks,
average_python_time,
average_c_time,
average_cpu_time,
)
# First, handle the main thread.
Scalene.enter_function_meta(main_thread_frame, Scalene.__stats)
fname = Filename(main_thread_frame.f_code.co_filename)
lineno = LineNumber(main_thread_frame.f_lineno)
# print(main_thread_frame)
# print(fname, lineno)
main_tid = cast(int, threading.main_thread().ident)
if not is_thread_sleeping[main_tid]:
Scalene.__stats.cpu_samples_python[fname][
lineno
] += average_python_time
Scalene.__stats.cpu_samples_c[fname][lineno] += average_c_time
Scalene.__stats.cpu_samples[fname] += average_cpu_time
Scalene.__stats.cpu_utilization[fname][lineno].push(
cpu_utilization
)
Scalene.__stats.core_utilization[fname][lineno].push(
core_utilization
)
Scalene.__stats.gpu_samples[fname][lineno] += (
gpu_load * elapsed_wallclock
)
Scalene.__stats.n_gpu_samples[fname][lineno] += elapsed_wallclock
Scalene.__stats.gpu_mem_samples[fname][lineno].push(gpu_mem_used)
# Now handle the rest of the threads.
for frame, tident, orig_frame in new_frames:
if frame == main_thread_frame:
continue
add_stack(
frame,
Scalene.should_trace,
Scalene.__stats.stacks,
average_python_time,
average_c_time,
average_cpu_time,
)
# In a thread.
fname = Filename(frame.f_code.co_filename)
lineno = LineNumber(frame.f_lineno)
Scalene.enter_function_meta(frame, Scalene.__stats)
# We can't play the same game here of attributing
# time, because we are in a thread, and threads don't
# get signals in Python. Instead, we check if the
# bytecode instruction being executed is a function
# call. If so, we attribute all the time to native.
# NOTE: for now, we don't try to attribute GPU time to threads.
if is_thread_sleeping[tident]:
# Ignore sleeping threads.
continue
# Check if the original caller is stuck inside a call.
if ScaleneFuncUtils.is_call_function(
orig_frame.f_code,
ByteCodeIndex(orig_frame.f_lasti),
):
# It is. Attribute time to native.
Scalene.__stats.cpu_samples_c[fname][lineno] += normalized_time
else:
# Not in a call function so we attribute the time to Python.
Scalene.__stats.cpu_samples_python[fname][
lineno
] += normalized_time
Scalene.__stats.cpu_samples[fname] += normalized_time
Scalene.__stats.cpu_utilization[fname][lineno].push(
cpu_utilization
)
Scalene.__stats.core_utilization[fname][lineno].push(
core_utilization
)
# Clean up all the frames
del new_frames[:]
del new_frames
del is_thread_sleeping
Scalene.__stats.total_cpu_samples += total_time | Handle interrupts for CPU profiling. | process_cpu_sample | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def compute_frames_to_record() -> List[Tuple[FrameType, int, FrameType]]:
"""Collect all stack frames that Scalene actually processes."""
frames: List[Tuple[FrameType, int]] = [
(
cast(
FrameType,
sys._current_frames().get(cast(int, t.ident), None),
),
cast(int, t.ident),
)
for t in threading.enumerate()
if t != threading.main_thread()
]
# Put the main thread in the front.
tid = cast(int, threading.main_thread().ident)
frames.insert(
0,
(
sys._current_frames().get(tid, cast(FrameType, None)),
tid,
),
)
# Process all the frames to remove ones we aren't going to track.
new_frames: List[Tuple[FrameType, int, FrameType]] = []
for frame, tident in frames:
orig_frame = frame
if not frame:
continue
fname = frame.f_code.co_filename
func = frame.f_code.co_name
# Record samples only for files we care about.
if not fname:
# 'eval/compile' gives no f_code.co_filename. We have
# to look back into the outer frame in order to check
# the co_filename.
back = cast(FrameType, frame.f_back)
fname = Filename(back.f_code.co_filename)
func = back.f_code.co_name
while not Scalene.should_trace(fname, func):
# Walk the stack backwards until we hit a frame that
# IS one we should trace (if there is one). i.e., if
# it's in the code being profiled, and it is just
# calling stuff deep in libraries.
if frame:
frame = cast(FrameType, frame.f_back)
else:
break
if frame:
fname = frame.f_code.co_filename
func = frame.f_code.co_name
if frame:
new_frames.append((frame, tident, orig_frame))
del frames[:]
return new_frames | Collect all stack frames that Scalene actually processes. | compute_frames_to_record | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def enter_function_meta(
frame: FrameType, stats: ScaleneStatistics
) -> None:
"""Update tracking info so we can correctly report line number info later."""
fname = Filename(frame.f_code.co_filename)
lineno = LineNumber(frame.f_lineno)
f = frame
try:
while "<" in Filename(f.f_code.co_name):
f = cast(FrameType, f.f_back)
# Handle case where the function with the name wrapped
# in triangle brackets is at the bottom of the stack
if f is None:
return
except Exception:
return
if not Scalene.should_trace(f.f_code.co_filename, f.f_code.co_name):
return
fn_name = get_fully_qualified_name(f)
firstline = f.f_code.co_firstlineno
stats.function_map[fname][lineno] = fn_name
stats.firstline_map[fn_name] = LineNumber(firstline) | Update tracking info so we can correctly report line number info later. | enter_function_meta | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def alloc_sigqueue_processor(x: Optional[List[int]]) -> None:
"""Handle interrupts for memory profiling (mallocs and frees)."""
stats = Scalene.__stats
curr_pid = os.getpid()
# Process the input array from where we left off reading last time.
arr: List[
Tuple[
int,
str,
float,
float,
str,
Filename,
LineNumber,
ByteCodeIndex,
]
] = []
with contextlib.suppress(FileNotFoundError):
while Scalene.__malloc_mapfile.read():
count_str = Scalene.__malloc_mapfile.get_str()
if count_str.strip() == "":
break
(
action,
alloc_time_str,
count_str,
python_fraction_str,
pid,
pointer,
reported_fname,
reported_lineno,
bytei_str,
) = count_str.split(",")
if int(curr_pid) != int(pid):
continue
arr.append(
(
int(alloc_time_str),
action,
float(count_str),
float(python_fraction_str),
pointer,
Filename(reported_fname),
LineNumber(int(reported_lineno)),
ByteCodeIndex(int(bytei_str)),
)
)
stats.alloc_samples += len(arr)
# Iterate through the array to compute the new current footprint
# and update the global __memory_footprint_samples. Since on some systems,
# we get free events before mallocs, force `before` to always be at least 0.
before = max(stats.current_footprint, 0)
prevmax = stats.max_footprint
freed_last_trigger = 0
for item in arr:
(
_alloc_time,
action,
count,
python_fraction,
pointer,
fname,
lineno,
bytei,
) = item
is_malloc = action == Scalene.MALLOC_ACTION
if count == scalene.scalene_config.NEWLINE_TRIGGER_LENGTH + 1:
continue # in previous implementations, we were adding NEWLINE to the footprint.
# We should not account for this in the user-facing profile.
count /= Scalene.BYTES_PER_MB
if is_malloc:
stats.current_footprint += count
if stats.current_footprint > stats.max_footprint:
stats.max_footprint = stats.current_footprint
stats.max_footprint_python_fraction = python_fraction
stats.max_footprint_loc = (fname, lineno)
else:
assert action in [
Scalene.FREE_ACTION,
Scalene.FREE_ACTION_SAMPLED,
]
stats.current_footprint -= count
# Force current footprint to be non-negative; this
# code is needed because Scalene can miss some initial
# allocations at startup.
stats.current_footprint = max(0, stats.current_footprint)
if (
action == Scalene.FREE_ACTION_SAMPLED
and stats.last_malloc_triggered[2] == pointer
):
freed_last_trigger += 1
timestamp = time.monotonic_ns() - Scalene.__start_time
stats.memory_footprint_samples.append(
[
timestamp,
stats.current_footprint,
]
)
after = stats.current_footprint
if freed_last_trigger:
if freed_last_trigger <= 1:
# We freed the last allocation trigger. Adjust scores.
this_fn, this_ln, _this_ptr = stats.last_malloc_triggered
if this_ln != 0:
mallocs, frees = stats.leak_score[this_fn][this_ln]
stats.leak_score[this_fn][this_ln] = (
mallocs,
frees + 1,
)
stats.last_malloc_triggered = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
allocs = 0.0
last_malloc = (Filename(""), LineNumber(0), Address("0x0"))
malloc_pointer = "0x0"
curr = before
# Go through the array again and add each updated current footprint.
for item in arr:
(
_alloc_time,
action,
count,
python_fraction,
pointer,
fname,
lineno,
bytei,
) = item
is_malloc = action == Scalene.MALLOC_ACTION
if (
is_malloc
and count == scalene.scalene_config.NEWLINE_TRIGGER_LENGTH + 1
):
with Scalene.__invalidate_mutex:
last_file, last_line = Scalene.__invalidate_queue.pop(0)
stats.memory_malloc_count[last_file][last_line] += 1
stats.memory_aggregate_footprint[last_file][
last_line
] += stats.memory_current_highwater_mark[last_file][last_line]
stats.memory_current_footprint[last_file][last_line] = 0
stats.memory_current_highwater_mark[last_file][last_line] = 0
continue
# Add the byte index to the set for this line (if it's not there already).
stats.bytei_map[fname][lineno].add(bytei)
count /= Scalene.BYTES_PER_MB
if is_malloc:
allocs += count
curr += count
assert curr <= stats.max_footprint
malloc_pointer = pointer
stats.memory_malloc_samples[fname][lineno] += count
stats.memory_python_samples[fname][lineno] += (
python_fraction * count
)
stats.malloc_samples[fname] += 1
stats.total_memory_malloc_samples += count
# Update current and max footprints for this file & line.
stats.memory_current_footprint[fname][lineno] += count
stats.memory_current_highwater_mark[fname][lineno] = max(
stats.memory_current_highwater_mark[fname][lineno],
stats.memory_current_footprint[fname][lineno],
)
assert stats.current_footprint <= stats.max_footprint
stats.memory_max_footprint[fname][lineno] = max(
stats.memory_current_footprint[fname][lineno],
stats.memory_max_footprint[fname][lineno],
)
# Ensure that the max footprint never goes above the true max footprint.
# This is a work-around for a condition that in theory should never happen, but...
stats.memory_max_footprint[fname][lineno] = min(
stats.max_footprint,
stats.memory_max_footprint[fname][lineno],
)
assert stats.current_footprint <= stats.max_footprint
assert (
stats.memory_max_footprint[fname][lineno]
<= stats.max_footprint
)
else:
assert action in [
Scalene.FREE_ACTION,
Scalene.FREE_ACTION_SAMPLED,
]
curr -= count
stats.memory_free_samples[fname][lineno] += count
stats.memory_free_count[fname][lineno] += 1
stats.total_memory_free_samples += count
stats.memory_current_footprint[fname][lineno] -= count
# Ensure that we never drop the current footprint below 0.
stats.memory_current_footprint[fname][lineno] = max(
0, stats.memory_current_footprint[fname][lineno]
)
stats.per_line_footprint_samples[fname][lineno].append(
[time.monotonic_ns() - Scalene.__start_time, max(0, curr)]
)
# If we allocated anything, then mark this as the last triggering malloc
if allocs > 0:
last_malloc = (
Filename(fname),
LineNumber(lineno),
Address(malloc_pointer),
)
stats.allocation_velocity = (
stats.allocation_velocity[0] + (after - before),
stats.allocation_velocity[1] + allocs,
)
if (
Scalene.__args.memory_leak_detector
and prevmax < stats.max_footprint
and stats.max_footprint > 100
):
stats.last_malloc_triggered = last_malloc
fname, lineno, _ = last_malloc
mallocs, frees = stats.leak_score[fname][lineno]
stats.leak_score[fname][lineno] = (mallocs + 1, frees) | Handle interrupts for memory profiling (mallocs and frees). | alloc_sigqueue_processor | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def before_fork() -> None:
"""The parent process should invoke this function just before a fork.
Invoked by replacement_fork.py.
"""
Scalene.stop_signal_queues() | The parent process should invoke this function just before a fork.
Invoked by replacement_fork.py. | before_fork | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def after_fork_in_parent(child_pid: int) -> None:
"""The parent process should invoke this function after a fork.
Invoked by replacement_fork.py.
"""
Scalene.add_child_pid(child_pid)
Scalene.start_signal_queues() | The parent process should invoke this function after a fork.
Invoked by replacement_fork.py. | after_fork_in_parent | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def after_fork_in_child() -> None:
"""
Executed by a child process after a fork; mutates the
current profiler into a child.
Invoked by replacement_fork.py.
"""
Scalene.__is_child = True
Scalene.clear_metrics()
if Scalene.__accelerator and Scalene.__accelerator.has_gpu():
Scalene.__accelerator.reinit()
# Note: __parent_pid of the topmost process is its own pid.
Scalene.__pid = Scalene.__parent_pid
if "off" not in Scalene.__args or not Scalene.__args.off:
Scalene.enable_signals() | Executed by a child process after a fork; mutates the
current profiler into a child.
Invoked by replacement_fork.py. | after_fork_in_child | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def memcpy_sigqueue_processor(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
frame: FrameType,
) -> None:
"""Process memcpy signals (used in a ScaleneSigQueue)."""
curr_pid = os.getpid()
arr: List[Tuple[str, int, int, int, int]] = []
# Process the input array.
with contextlib.suppress(ValueError):
while Scalene.__memcpy_mapfile.read():
count_str = Scalene.__memcpy_mapfile.get_str()
(
memcpy_time_str,
count_str2,
pid,
filename,
lineno,
bytei,
) = count_str.split(",")
if int(curr_pid) != int(pid):
continue
arr.append(
(
filename,
int(lineno),
int(bytei),
int(memcpy_time_str),
int(count_str2),
)
)
arr.sort()
for item in arr:
filename, linenum, byteindex, _memcpy_time, count = item
fname = Filename(filename)
line_no = LineNumber(linenum)
byteidx = ByteCodeIndex(byteindex)
# Add the byte index to the set for this line.
Scalene.__stats.bytei_map[fname][line_no].add(byteidx)
Scalene.__stats.memcpy_samples[fname][line_no] += int(count) | Process memcpy signals (used in a ScaleneSigQueue). | memcpy_sigqueue_processor | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def should_trace(filename: Filename, func: str) -> bool:
"""Return true if we should trace this filename and function."""
if not filename:
return False
if Scalene.__profiler_base in filename:
# Don't profile the profiler.
return False
if Scalene.__functions_to_profile:
if filename in Scalene.__functions_to_profile:
if func in {
fn.__code__.co_name
for fn in Scalene.__functions_to_profile[filename]
}:
return True
return False
# Don't profile the Python libraries, unless overridden by --profile-all
try:
resolved_filename = str(pathlib.Path(filename).resolve())
except OSError:
# Not a file
return False
if not Scalene.__args.profile_all:
for n in sysconfig.get_scheme_names():
for p in sysconfig.get_path_names():
the_path = sysconfig.get_path(p, n)
libdir = str(pathlib.Path(the_path).resolve())
if libdir in resolved_filename:
return False
# Generic handling follows (when no @profile decorator has been used).
# TODO [EDB]: add support for this in traceconfig.cpp
profile_exclude_list = Scalene.__args.profile_exclude.split(",")
if any(
prof in filename for prof in profile_exclude_list if prof != ""
):
return False
if filename.startswith("_ipython-input-"):
# Profiling code created in a Jupyter cell:
# create a file to hold the contents.
import IPython
if result := re.match(r"_ipython-input-([0-9]+)-.*", filename):
# Write the cell's contents into the file.
cell_contents = (
IPython.get_ipython().history_manager.input_hist_raw[
int(result[1])
]
)
with open(filename, "w+") as f:
f.write(cell_contents)
return True
# If (a) `profile-only` was used, and (b) the file matched
# NONE of the provided patterns, don't profile it.
profile_only_set = set(Scalene.__args.profile_only.split(","))
if profile_only_set and all(
prof not in filename for prof in profile_only_set
):
return False
if filename[0] == "<" and filename[-1] == ">":
# Special non-file
return False
# Now we've filtered out any non matches to profile-only patterns.
# If `profile-all` is specified, profile this file.
if Scalene.__args.profile_all:
return True
# Profile anything in the program's directory or a child directory,
# but nothing else, unless otherwise specified.
filename = Filename(
os.path.normpath(os.path.join(Scalene.__program_path, filename))
)
return Scalene.__program_path in filename | Return true if we should trace this filename and function. | should_trace | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start() -> None:
"""Initiate profiling."""
if not Scalene.__initialized:
print(
"ERROR: Do not try to invoke `start` if you have not called Scalene using one of the methods\n"
"in https://github.com/plasma-umass/scalene#using-scalene\n"
"(The most likely issue is that you need to run your code with `scalene`, not `python`).",
file=sys.stderr,
)
sys.exit(1)
Scalene.__stats.start_clock()
Scalene.enable_signals()
Scalene.__start_time = time.monotonic_ns()
Scalene.__done = False
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.set_scalene_done_false() | Initiate profiling. | start | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop() -> None:
"""Complete profiling."""
Scalene.__done = True
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.set_scalene_done_true()
Scalene.disable_signals()
Scalene.__stats.stop_clock()
if Scalene.__args.outfile:
Scalene.__profile_filename = os.path.join(
os.path.dirname(Scalene.__args.outfile),
os.path.basename(Scalene.__profile_filename),
)
if (
Scalene.__args.web
and not Scalene.__args.cli
and not Scalene.__is_child
):
# First, check for a browser.
try:
if not find_browser():
# Could not open a graphical web browser tab;
# act as if --web was not specified
Scalene.__args.web = False
else:
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = Scalene.__profile_filename
except Exception:
# Couldn't find a browser.
Scalene.__args.web = False
# If so, set variables appropriately.
if Scalene.__args.web and Scalene.in_jupyter():
# Force JSON output to profile.json.
Scalene.__args.json = True
Scalene.__output.html = False
Scalene.__output.output_file = Scalene.__profile_filename | Complete profiling. | stop | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def is_done() -> bool:
"""Return true if Scalene has stopped profiling."""
return Scalene.__done | Return true if Scalene has stopped profiling. | is_done | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start_signal_handler(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
_this_frame: Optional[FrameType],
) -> None:
"""Respond to a signal to start or resume profiling (--on).
See scalene_parseargs.py.
"""
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.start_profiling_signal)
Scalene.start() | Respond to a signal to start or resume profiling (--on).
See scalene_parseargs.py. | start_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop_signal_handler(
_signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
_this_frame: Optional[FrameType],
) -> None:
"""Respond to a signal to suspend profiling (--off).
See scalene_parseargs.py.
"""
for pid in Scalene.child_pids:
Scalene.__orig_kill(pid, Scalene.__signals.stop_profiling_signal)
Scalene.stop()
# Output the profile if `--outfile` was set to a file.
if Scalene.__output.output_file:
Scalene.output_profile(sys.argv) | Respond to a signal to suspend profiling (--off).
See scalene_parseargs.py. | stop_signal_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def disable_signals(retry: bool = True) -> None:
"""Turn off the profiling signals."""
if sys.platform == "win32":
Scalene.timer_signals = False
return
try:
assert Scalene.__signals.cpu_timer_signal is not None
Scalene.__orig_setitimer(Scalene.__signals.cpu_timer_signal, 0)
for sig in [
Scalene.__signals.malloc_signal,
Scalene.__signals.free_signal,
Scalene.__signals.memcpy_signal,
]:
Scalene.__orig_signal(sig, signal.SIG_IGN)
Scalene.stop_signal_queues()
except Exception:
# Retry just in case we get interrupted by one of our own signals.
if retry:
Scalene.disable_signals(retry=False) | Turn off the profiling signals. | disable_signals | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def exit_handler() -> None:
"""When we exit, disable all signals."""
Scalene.disable_signals()
# Delete the temporary directory.
with contextlib.suppress(Exception):
if not Scalene.__pid:
Scalene.__python_alias_dir.cleanup() # type: ignore
with contextlib.suppress(Exception):
os.remove(f"/tmp/scalene-malloc-lock{os.getpid()}") | When we exit, disable all signals. | exit_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def profile_code(
self,
code: str,
the_globals: Dict[str, str],
the_locals: Dict[str, str],
left: List[str],
) -> int:
"""Initiate execution and profiling."""
if Scalene.__args.memory:
from scalene import pywhere # type: ignore
pywhere.populate_struct()
# If --off is set, tell all children to not profile and stop profiling before we even start.
if "off" not in Scalene.__args or not Scalene.__args.off:
self.start()
# Run the code being profiled.
exit_status = 0
try:
exec(code, the_globals, the_locals)
except SystemExit as se:
# Intercept sys.exit and propagate the error code.
exit_status = se.code if isinstance(se.code, int) else 1
except KeyboardInterrupt:
# Cleanly handle keyboard interrupts (quits execution and dumps the profile).
print("Scalene execution interrupted.", file=sys.stderr)
except Exception as e:
print(f"{Scalene.__error_message}:\n", e, file=sys.stderr)
traceback.print_exc()
exit_status = 1
finally:
self.stop()
if Scalene.__args.memory:
pywhere.disable_settrace()
pywhere.depopulate_struct()
# Leaving here in case of reversion
# sys.settrace(None)
stats = Scalene.__stats
(last_file, last_line, _) = Scalene.last_profiled_tuple()
stats.memory_malloc_count[last_file][last_line] += 1
stats.memory_aggregate_footprint[last_file][
last_line
] += stats.memory_current_highwater_mark[last_file][last_line]
# If we've collected any samples, dump them.
did_output = Scalene.output_profile(left)
if not did_output:
print(
"Scalene: The specified code did not run for long enough to profile.",
file=sys.stderr,
)
# Print out hints to explain why the above message may have been printed.
if not Scalene.__args.profile_all:
# if --profile-all was not specified, suggest it
# as a way to profile otherwise excluded code
# (notably Python libraries, which are excluded by
# default).
print(
"By default, Scalene only profiles code in the file executed and its subdirectories.",
file=sys.stderr,
)
print(
"To track the time spent in all files, use the `--profile-all` option.",
file=sys.stderr,
)
elif (
Scalene.__args.profile_only
or Scalene.__args.profile_exclude
):
# if --profile-only or --profile-exclude were
# specified, suggest that the patterns might be
# excluding too many files. Collecting the
# previously filtered out files could allow
# suggested fixes (as in, remove foo because it
# matches too many files).
print(
"The patterns used in `--profile-only` or `--profile-exclude` may be filtering out too many files.",
file=sys.stderr,
)
else:
# if none of the above cases hold, indicate that
# Scalene can only profile code that runs for at
# least one second or allocates some threshold
# amount of memory.
print(
"Scalene can only profile code that runs for at least one second or allocates at least 10MB.",
file=sys.stderr,
)
if not (
did_output
and Scalene.__args.web
and not Scalene.__args.cli
and not Scalene.__is_child
):
return exit_status
if Scalene.__args.web or Scalene.__args.html:
profile_filename = Scalene.__profile_filename
if Scalene.__args.outfile:
profile_filename = os.path.join(
os.path.dirname(Scalene.__args.outfile),
os.path.splitext(os.path.basename(Scalene.__args.outfile))[0] + ".json"
)
generate_html(
profile_fname=profile_filename,
output_fname=(
Scalene.__profiler_html if not Scalene.__args.outfile
else Scalene.__args.outfile
),
)
if Scalene.in_jupyter():
from scalene.scalene_jupyter import ScaleneJupyter
port = ScaleneJupyter.find_available_port(8181, 9000)
if not port:
print(
"Scalene error: could not find an available port.",
file=sys.stderr,
)
else:
ScaleneJupyter.display_profile(
port, Scalene.__profiler_html
)
else:
if not Scalene.__args.no_browser:
# Remove any interposition libraries from the environment before opening the browser.
# See also scalene/scalene_preload.py
old_dyld = os.environ.pop("DYLD_INSERT_LIBRARIES", "")
old_ld = os.environ.pop("LD_PRELOAD", "")
output_fname = (
f"{os.getcwd()}{os.sep}{Scalene.__profiler_html}"
)
if Scalene.__pid == 0:
# Only open a browser tab for the parent.
dir = os.path.dirname(__file__)
subprocess.Popen(
[
Scalene.__orig_python,
f"{dir}{os.sep}launchbrowser.py",
output_fname,
str(scalene.scalene_config.SCALENE_PORT),
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Restore them.
os.environ.update(
{
"DYLD_INSERT_LIBRARIES": old_dyld,
"LD_PRELOAD": old_ld,
}
)
return exit_status | Initiate execution and profiling. | profile_code | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def process_args(args: argparse.Namespace) -> None:
"""Process all arguments."""
Scalene.__args = ScaleneArguments(**vars(args))
Scalene.__next_output_time = (
time.perf_counter() + Scalene.__args.profile_interval
)
Scalene.__output.html = Scalene.__args.html
if Scalene.__args.outfile:
Scalene.__output.output_file = os.path.abspath(
os.path.expanduser(Scalene.__args.outfile)
)
Scalene.__is_child = Scalene.__args.pid != 0
# the pid of the primary profiler
Scalene.__parent_pid = Scalene.__args.pid if Scalene.__is_child else os.getpid()
# Don't profile the GPU if not enabled (i.e., either no options or --cpu and/or --memory, but no --gpu).
if not Scalene.__args.gpu:
Scalene.__output.gpu = False
Scalene.__json.gpu = False | Process all arguments. | process_args | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def set_initialized() -> None:
"""Indicate that Scalene has been initialized and is ready to begin profiling."""
Scalene.__initialized = True | Indicate that Scalene has been initialized and is ready to begin profiling. | set_initialized | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def main() -> None:
"""Initialize and profile."""
(
args,
left,
) = ScaleneParseArgs.parse_args()
# Try to profile an accelerator if one is found and `--gpu` is selected / it's the default (see ScaleneArguments).
if args.gpu:
if platform.system() == "Darwin":
from scalene.scalene_apple_gpu import ScaleneAppleGPU
Scalene.__accelerator = ScaleneAppleGPU()
else:
from scalene.scalene_nvidia_gpu import ScaleneNVIDIAGPU
Scalene.__accelerator = ScaleneNVIDIAGPU()
if not Scalene.__accelerator.has_gpu():
# Failover to try Neuron
from scalene.scalene_neuron import ScaleneNeuron
Scalene.__accelerator = ScaleneNeuron()
Scalene.__output.gpu = Scalene.__accelerator.has_gpu()
Scalene.__json.gpu = Scalene.__output.gpu
Scalene.__json.gpu_device = Scalene.__accelerator.gpu_device()
else:
Scalene.__accelerator = None
Scalene.__output.gpu = False
Scalene.__json.gpu = False
Scalene.__json.gpu_device = ""
Scalene.set_initialized()
Scalene.run_profiler(args, left) | Initialize and profile. | main | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def register_files_to_profile() -> None:
"""Tells the pywhere module, which tracks memory, which files to profile."""
from scalene import pywhere # type: ignore
profile_only_list = Scalene.__args.profile_only.split(",")
pywhere.register_files_to_profile(
list(Scalene.__files_to_profile) + profile_only_list,
Scalene.__program_path,
Scalene.__args.profile_all,
) | Tells the pywhere module, which tracks memory, which files to profile. | register_files_to_profile | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def run_profiler(
args: argparse.Namespace, left: List[str], is_jupyter: bool = False
) -> None:
"""Set up and initiate profiling."""
# Set up signal handlers for starting and stopping profiling.
if is_jupyter:
Scalene.set_in_jupyter()
if not Scalene.__initialized:
print(
"ERROR: Do not try to manually invoke `run_profiler`.\n"
"To invoke Scalene programmatically, see the usage noted in https://github.com/plasma-umass/scalene#using-scalene",
file=sys.stderr,
)
sys.exit(1)
if sys.platform != "win32":
for sig, handler in [
(
Scalene.__signals.start_profiling_signal,
Scalene.start_signal_handler,
),
(
Scalene.__signals.stop_profiling_signal,
Scalene.stop_signal_handler,
),
]:
Scalene.__orig_signal(sig, handler)
Scalene.__orig_siginterrupt(sig, False)
Scalene.__orig_signal(signal.SIGINT, Scalene.interruption_handler)
did_preload = (
False if is_jupyter else ScalenePreload.setup_preload(args)
)
if not did_preload:
with contextlib.suppress(Exception):
# If running in the background, print the PID.
if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
# In the background.
print(
f"Scalene now profiling process {os.getpid()}",
file=sys.stderr,
)
print(
f" to disable profiling: python3 -m scalene.profile --off --pid {os.getpid()}",
file=sys.stderr,
)
print(
f" to resume profiling: python3 -m scalene.profile --on --pid {os.getpid()}",
file=sys.stderr,
)
Scalene.__stats.clear_all()
sys.argv = left
with contextlib.suppress(Exception):
if not is_jupyter:
multiprocessing.set_start_method("fork")
spec = None
try:
Scalene.process_args(args)
progs = None
exit_status = 0
try:
# Handle direct invocation of a string by executing the string and returning.
if len(sys.argv) >= 2 and sys.argv[0] == "-c":
try:
exec(sys.argv[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
if len(sys.argv) >= 2 and sys.argv[0] == "-m":
module = True
# Remove -m and the provided module name
_, mod_name, *sys.argv = sys.argv
# Given `some.module`, find the path of the corresponding
# some/module/__main__.py or some/module.py file to run.
_, spec, _ = _get_module_details(mod_name)
if not spec.origin:
raise FileNotFoundError
# Prepend the found .py file to arguments
sys.argv.insert(0, spec.origin)
else:
module = False
# Look for something ending in '.py'. Treat the first one as our executable.
progs = [x for x in sys.argv if re.match(r".*\.py$", x)]
# Just in case that didn't work, try sys.argv[0] and __file__.
with contextlib.suppress(Exception):
progs.extend((sys.argv[0], __file__))
if not progs:
raise FileNotFoundError
# Use the full absolute path of the program being profiled, expanding ~ if need be.
prog_name = os.path.abspath(os.path.expanduser(progs[0]))
with open(
prog_name, "r", encoding="utf-8"
) as prog_being_profiled:
# Read in the code and compile it.
code: Any = ""
try:
code = compile(
prog_being_profiled.read(),
prog_name,
"exec",
)
except SyntaxError:
traceback.print_exc()
sys.exit(1)
# Push the program's path.
program_path = Filename(os.path.dirname(prog_name))
if not module:
sys.path.insert(0, program_path)
# NOTE: Python, in its standard mode of operation,
# places the root of the module tree at the directory of
# the entrypoint script. This is different in how things
# work with the `-m` mode of operation, so for now we do not
# surface this in Scalene
#
# TODO: Add in entrypoint_dir logic for `-m` operation
Scalene.__entrypoint_dir = program_path
# If a program path was specified at the command-line, use it.
if len(Scalene.__args.program_path) > 0:
Scalene.__program_path = Filename(
os.path.abspath(args.program_path)
)
else:
# Otherwise, use the invoked directory.
Scalene.__program_path = program_path
# Grab local and global variables.
if Scalene.__args.memory:
Scalene.register_files_to_profile()
import __main__
the_locals = __main__.__dict__
the_globals = __main__.__dict__
# Splice in the name of the file being executed instead of the profiler.
the_globals["__file__"] = prog_name
# This part works because of the order in which Python attempts to resolve names--
# Within a given context, it first tries to look for __package__, and then for __spec__.
# __spec__ is a ModuleSpec object that carries a lot of extra machinery and requires
# extra effort to create (it seems, at least).
#
# __spec__ was originally set to none because the __globals__ here has the Scalene ModuleSpec
# but it doesn't seem like that was enough. Setting the __package__, as below, seems to be enough to make
# it look in the right place
the_globals["__spec__"] = None
if spec is not None:
name = spec.name
the_globals["__package__"] = name.split(".")[0]
# Do a GC before we start.
gc.collect()
# Start the profiler.
profiler = Scalene(args, Filename(prog_name))
try:
# We exit with this status (returning error code as appropriate).
exit_status = profiler.profile_code(
code, the_locals, the_globals, left
)
if not is_jupyter:
sys.exit(exit_status)
except StopJupyterExecution:
# Running in Jupyter notebooks
pass
except AttributeError:
# don't let the handler below mask programming errors
raise
except Exception as ex:
template = "Scalene: An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
except (FileNotFoundError, IOError):
if progs:
print(
f"Scalene: could not find input file {prog_name}",
file=sys.stderr,
)
else:
print("Scalene: no input file specified.", file=sys.stderr)
sys.exit(1)
except SystemExit as e:
exit_status = e.code if isinstance(e.code, int) else 1
except StopJupyterExecution:
pass
except Exception:
print(
"Scalene failed to initialize.\n" + traceback.format_exc(),
file=sys.stderr,
)
sys.exit(1)
finally:
with contextlib.suppress(Exception):
for mapfile in [
Scalene.__malloc_mapfile,
Scalene.__memcpy_mapfile,
]:
mapfile.close()
if not Scalene.__is_child:
mapfile.cleanup()
if not is_jupyter:
sys.exit(exit_status) | Set up and initiate profiling. | run_profiler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def is_call_function(code: CodeType, bytei: ByteCodeIndex) -> bool:
"""Returns true iff the bytecode at the given index is a function call."""
return any(
(
ins.offset == bytei
and ins.opcode in ScaleneFuncUtils.__call_opcodes
)
for ins in dis.get_instructions(code)
) | Returns true iff the bytecode at the given index is a function call. | is_call_function | python | plasma-umass/scalene | scalene/scalene_funcutils.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_funcutils.py | Apache-2.0 |
def clear(self) -> None:
"""Reset all statistics except for memory footprint."""
self.start_time = 0
self.elapsed_time = 0
self.alloc_samples = 0
self.stacks.clear()
self.cpu_samples_python.clear()
self.cpu_samples_c.clear()
self.cpu_utilization.clear()
self.core_utilization.clear()
self.cpu_samples.clear()
self.gpu_samples.clear()
self.malloc_samples.clear()
self.memory_malloc_samples.clear()
self.memory_malloc_count.clear()
self.memory_current_footprint.clear()
self.memory_max_footprint.clear()
self.memory_current_highwater_mark.clear()
self.memory_aggregate_footprint.clear()
self.memory_python_samples.clear()
self.memory_free_samples.clear()
self.memory_free_count.clear()
self.memcpy_samples.clear()
self.total_cpu_samples = 0.0
self.total_gpu_samples = 0.0
self.n_gpu_samples.clear()
self.total_memory_malloc_samples = 0.0
self.total_memory_free_samples = 0.0
self.current_footprint = 0.0
self.leak_score.clear()
self.last_malloc_triggered = (
Filename(""),
LineNumber(0),
Address("0x0"),
)
self.allocation_velocity = (0.0, 0.0)
self.per_line_footprint_samples.clear()
self.bytei_map.clear() | Reset all statistics except for memory footprint. | clear | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def clear_all(self) -> None:
"""Clear all statistics."""
self.clear()
self.current_footprint = 0
self.max_footprint = 0
self.max_footprint_loc = None
self.per_line_footprint_samples.clear() | Clear all statistics. | clear_all | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def start_clock(self) -> None:
"""Start the timer."""
self.start_time = time.time() | Start the timer. | start_clock | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def stop_clock(self) -> None:
"""Stop the timer."""
if self.start_time > 0:
self.elapsed_time += time.time() - self.start_time
self.start_time = 0 | Stop the timer. | stop_clock | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def build_function_stats(self, filename: Filename) -> ScaleneStatistics:
"""Produce aggregated statistics for each function."""
fn_stats = ScaleneStatistics()
fn_stats.elapsed_time = self.elapsed_time
fn_stats.total_cpu_samples = self.total_cpu_samples
fn_stats.total_gpu_samples = self.total_gpu_samples
fn_stats.n_gpu_samples = self.n_gpu_samples
fn_stats.total_memory_malloc_samples = self.total_memory_malloc_samples
first_line_no = LineNumber(1)
fn_stats.function_map = self.function_map
fn_stats.firstline_map = self.firstline_map
for line_no in self.function_map[filename]:
fn_name = self.function_map[filename][line_no]
if fn_name == "<module>":
continue
fn_stats.cpu_samples_c[fn_name][
first_line_no
] += self.cpu_samples_c[filename][line_no]
fn_stats.cpu_samples_python[fn_name][
first_line_no
] += self.cpu_samples_python[filename][line_no]
fn_stats.gpu_samples[fn_name][first_line_no] += self.gpu_samples[
filename
][line_no]
fn_stats.n_gpu_samples[fn_name][
first_line_no
] += self.n_gpu_samples[filename][line_no]
fn_stats.gpu_mem_samples[fn_name][
first_line_no
] += self.gpu_mem_samples[filename][line_no]
fn_stats.cpu_utilization[fn_name][
first_line_no
] += self.cpu_utilization[filename][line_no]
fn_stats.core_utilization[fn_name][
first_line_no
] += self.core_utilization[filename][line_no]
fn_stats.per_line_footprint_samples[fn_name][
first_line_no
] += self.per_line_footprint_samples[filename][line_no]
fn_stats.memory_malloc_count[fn_name][
first_line_no
] += self.memory_malloc_count[filename][line_no]
fn_stats.memory_free_count[fn_name][
first_line_no
] += self.memory_free_count[filename][line_no]
fn_stats.memory_malloc_samples[fn_name][
first_line_no
] += self.memory_malloc_samples[filename][line_no]
fn_stats.memory_python_samples[fn_name][
first_line_no
] += self.memory_python_samples[filename][line_no]
fn_stats.memory_free_samples[fn_name][
first_line_no
] += self.memory_free_samples[filename][line_no]
for index in self.bytei_map[filename][line_no]:
fn_stats.bytei_map[fn_name][first_line_no].add(
ByteCodeIndex(index) # was 0
)
fn_stats.memcpy_samples[fn_name][
first_line_no
] += self.memcpy_samples[filename][line_no]
fn_stats.leak_score[fn_name][first_line_no] = (
fn_stats.leak_score[fn_name][first_line_no][0]
+ self.leak_score[filename][line_no][0],
fn_stats.leak_score[fn_name][first_line_no][1]
+ self.leak_score[filename][line_no][1],
)
fn_stats.memory_max_footprint[fn_name][first_line_no] = max(
fn_stats.memory_max_footprint[fn_name][first_line_no],
self.memory_max_footprint[filename][line_no],
)
fn_stats.memory_aggregate_footprint[fn_name][
first_line_no
] += self.memory_aggregate_footprint[filename][line_no]
return fn_stats | Produce aggregated statistics for each function. | build_function_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def output_stats(self, pid: int, dir_name: pathlib.Path) -> None:
"""Output statistics for a particular process to a given directory."""
payload: List[Any] = [
getattr(self, n) for n in ScaleneStatistics.payload_contents
]
# Create a file in the Python alias directory with the relevant info.
out_filename = os.path.join(
dir_name, f"scalene{pid}-{str(os.getpid())}"
)
with open(out_filename, "wb") as out_file:
cloudpickle.dump(payload, out_file) | Output statistics for a particular process to a given directory. | output_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_per_line_samples(
dest: Dict[Filename, Dict[LineNumber, T]],
src: Dict[Filename, Dict[LineNumber, T]],
) -> None:
"""Increment single-line dest samples by their value in src."""
for filename in src:
for lineno in src[filename]:
v = src[filename][lineno]
dest[filename][lineno] += v # type: ignore | Increment single-line dest samples by their value in src. | increment_per_line_samples | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_cpu_utilization(
dest: Dict[Filename, Dict[LineNumber, RunningStats]],
src: Dict[Filename, Dict[LineNumber, RunningStats]],
) -> None:
"""Increment CPU utilization."""
for filename in src:
for lineno in src[filename]:
dest[filename][lineno] += src[filename][lineno] | Increment CPU utilization. | increment_cpu_utilization | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def increment_core_utilization(
dest: Dict[Filename, Dict[LineNumber, RunningStats]],
src: Dict[Filename, Dict[LineNumber, RunningStats]],
) -> None:
"""Increment core utilization."""
for filename in src:
for lineno in src[filename]:
dest[filename][lineno] += src[filename][lineno] | Increment core utilization. | increment_core_utilization | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def merge_stats(self, the_dir_name: pathlib.Path) -> None:
"""Merge all statistics in a given directory."""
the_dir = pathlib.Path(the_dir_name)
for f in list(the_dir.glob(os.path.join("**", "scalene*"))):
# Skip empty files.
if os.path.getsize(f) == 0:
continue
with open(f, "rb") as file:
unpickler = pickle.Unpickler(file)
try:
value = unpickler.load()
except EOFError:
# Empty file for some reason.
continue
x = ScaleneStatistics()
for i, n in enumerate(ScaleneStatistics.payload_contents):
setattr(x, n, value[i])
if x.max_footprint > self.max_footprint:
self.max_footprint = x.max_footprint
self.max_footprint_loc = x.max_footprint_loc
self.current_footprint = max(
self.current_footprint, x.current_footprint
)
self.increment_cpu_utilization(
self.cpu_utilization, x.cpu_utilization
)
self.increment_core_utilization(
self.core_utilization, x.core_utilization
)
self.elapsed_time = max(self.elapsed_time, x.elapsed_time)
self.alloc_samples += x.alloc_samples
self.stacks.update(x.stacks)
self.total_cpu_samples += x.total_cpu_samples
self.total_gpu_samples += x.total_gpu_samples
self.increment_per_line_samples(
self.cpu_samples_c, x.cpu_samples_c
)
self.increment_per_line_samples(
self.cpu_samples_python, x.cpu_samples_python
)
self.increment_per_line_samples(
self.gpu_samples, x.gpu_samples
)
self.increment_per_line_samples(
self.n_gpu_samples, x.n_gpu_samples
)
self.increment_per_line_samples(
self.gpu_mem_samples, x.gpu_mem_samples
)
self.increment_per_line_samples(
self.memcpy_samples, x.memcpy_samples
)
self.increment_per_line_samples(
self.per_line_footprint_samples,
x.per_line_footprint_samples,
)
# Sorting each of the per_line_footprint_sample lists by time, since per_line_footprint_samples
# is sent between processes. Samples are in the format [time, footprint]
for filename in self.per_line_footprint_samples:
for lineno in self.per_line_footprint_samples[filename]:
self.per_line_footprint_samples[filename][lineno].sort(
key=lambda x: x[0]
)
self.increment_per_line_samples(
self.memory_malloc_count, x.memory_malloc_count
)
self.increment_per_line_samples(
self.memory_malloc_samples, x.memory_malloc_samples
)
self.increment_per_line_samples(
self.memory_python_samples, x.memory_python_samples
)
self.increment_per_line_samples(
self.memory_free_samples, x.memory_free_samples
)
self.increment_per_line_samples(
self.memory_free_count, x.memory_free_count
)
for filename in x.bytei_map:
for lineno in x.bytei_map[filename]:
v = x.bytei_map[filename][lineno]
self.bytei_map[filename][lineno] |= v
self.memory_max_footprint[filename][lineno] = max(
self.memory_max_footprint[filename][lineno],
x.memory_max_footprint[filename][lineno],
)
for filename in x.cpu_samples:
self.cpu_samples[filename] += x.cpu_samples[filename]
self.total_memory_free_samples += x.total_memory_free_samples
self.total_memory_malloc_samples += (
x.total_memory_malloc_samples
)
self.memory_footprint_samples += x.memory_footprint_samples
# Sorting footprint samples by time when sample was taken.
# Samples are in the format [time, footprint]
self.memory_footprint_samples.sort(key=lambda x: x[0])
for k, val in x.function_map.items():
if k in self.function_map:
self.function_map[k].update(val)
else:
self.function_map[k] = val
self.firstline_map.update(x.firstline_map)
os.remove(f) | Merge all statistics in a given directory. | merge_stats | python | plasma-umass/scalene | scalene/scalene_statistics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_statistics.py | Apache-2.0 |
def _set_accounting_mode(self) -> bool:
"""Returns true iff the accounting mode was set already for all GPUs or is now set."""
ngpus = self.__ngpus
for i in range(ngpus):
# Check if each GPU has accounting mode set.
h = self.__handle[i]
if (
pynvml.nvmlDeviceGetAccountingMode(h)
!= pynvml.NVML_FEATURE_ENABLED
):
# If not, try to set it. As a side effect, we turn persistence mode on
# so the driver is not unloaded (which undoes the accounting mode setting).
try:
pynvml.nvmlDeviceSetPersistenceMode(
h, pynvml.NVML_FEATURE_ENABLED
)
pynvml.nvmlDeviceSetAccountingMode(
h, pynvml.NVML_FEATURE_ENABLED
)
except pynvml.NVMLError:
# We don't have sufficient permissions.
return False
return True | Returns true iff the accounting mode was set already for all GPUs or is now set. | _set_accounting_mode | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def gpu_utilization(self, pid: int) -> float:
"""Return overall GPU utilization by pid if possible.
Otherwise, returns aggregate utilization across all running processes.
"""
if not self.has_gpu():
return 0
ngpus = self.__ngpus
accounting_on = self.__has_per_pid_accounting
utilization = 0
for i in range(ngpus):
h = self.__handle[i]
if accounting_on:
with contextlib.suppress(Exception):
utilization += pynvml.nvmlDeviceGetAccountingStats(
h, pid
).gpuUtilization
else:
try:
utilization += pynvml.nvmlDeviceGetUtilizationRates(h).gpu
except pynvml.NVMLError:
# Silently ignore NVML errors. "Fixes" https://github.com/plasma-umass/scalene/issues/471.
pass
return (utilization / ngpus) / 100.0 | Return overall GPU utilization by pid if possible.
Otherwise, returns aggregate utilization across all running processes. | gpu_utilization | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def has_gpu(self) -> bool:
"""True iff the system has a detected GPU."""
return self.__has_gpu | True iff the system has a detected GPU. | has_gpu | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def reinit(self) -> None:
"""Reinitialize the nvidia wrapper."""
if not self.has_gpu():
return
self.__handle = []
with contextlib.suppress(Exception):
pynvml.nvmlInit()
self.__ngpus = pynvml.nvmlDeviceGetCount()
self.__handle.extend(
pynvml.nvmlDeviceGetHandleByIndex(i)
for i in range(self.__ngpus)
) | Reinitialize the nvidia wrapper. | reinit | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def gpu_memory_usage(self, pid: int) -> float:
"""Returns GPU memory used by the process pid, in MB."""
# Adapted from https://github.com/gpuopenanalytics/pynvml/issues/21#issuecomment-678808658
if not self.has_gpu():
return 0
total_used_GPU_memory = 0
for i in range(self.__ngpus):
handle = self.__handle[i]
with contextlib.suppress(Exception):
for proc in pynvml.nvmlDeviceGetComputeRunningProcesses(
handle
):
# Only accumulate memory stats for the current pid.
if proc.usedGpuMemory and proc.pid == pid:
# First check is to protect against return of None
# from incompatible NVIDIA drivers.
total_used_GPU_memory += proc.usedGpuMemory / 1048576
return total_used_GPU_memory | Returns GPU memory used by the process pid, in MB. | gpu_memory_usage | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def get_stats(self) -> Tuple[float, float]:
"""Returns a tuple of (utilization %, memory in use)."""
if self.has_gpu():
total_load = self.gpu_utilization(self.__pid)
mem_used = self.gpu_memory_usage(self.__pid)
return (total_load, mem_used)
return (0.0, 0.0) | Returns a tuple of (utilization %, memory in use). | get_stats | python | plasma-umass/scalene | scalene/scalene_nvidia_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_nvidia_gpu.py | Apache-2.0 |
def reset(self) -> None:
"""Reset the timer."""
self.seconds = 0.0
self.interval = 0.0
self.is_set = False | Reset the timer. | reset | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def get_itimer(self) -> Tuple[float, float]:
"""Returns a tuple of (seconds, interval)."""
return self.seconds, self.interval | Returns a tuple of (seconds, interval). | get_itimer | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def yield_next_delay(self, elapsed: float) -> Tuple[bool, float]:
"""
Updates remaining_interval or remaining_seconds, returning whether
the timer signal should be passed up to the client and
the next delay. If the second return <= 0, then
there is no interval and the delay has elapsed.
"""
if self.delay_elapsed:
self.remaining_interval -= elapsed
is_done = self.remaining_interval <= 0
if is_done:
self.remaining_interval = self.interval
return is_done, self.remaining_interval
self.remaining_seconds -= elapsed
is_done = self.remaining_seconds <= 0
if is_done:
self.delay_elapsed = True
return (
is_done,
self.remaining_interval if is_done else self.remaining_seconds,
) | Updates remaining_interval or remaining_seconds, returning whether
the timer signal should be passed up to the client and
the next delay. If the second return <= 0, then
there is no interval and the delay has elapsed. | yield_next_delay | python | plasma-umass/scalene | scalene/scalene_client_timer.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_client_timer.py | Apache-2.0 |
def replacement_lock(scalene: Scalene) -> None:
class ReplacementLock:
"""Replace lock with a version that periodically yields and updates sleeping status."""
def __init__(self) -> None:
# Cache the original lock (which we replace)
# print("INITIALIZING LOCK")
self.__lock: threading.Lock = scalene.get_original_lock()
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
tident = threading.get_ident()
if blocking == 0:
blocking = False
start_time = time.perf_counter()
if blocking:
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
else:
interval = -1
while True:
scalene.set_thread_sleeping(tident)
acquired_lock = self.__lock.acquire(blocking, interval)
scalene.reset_thread_sleeping(tident)
if acquired_lock:
return True
if not blocking:
return False
# If a timeout was specified, check to see if it's expired.
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return False
def release(self) -> None:
self.__lock.release()
def locked(self) -> bool:
return self.__lock.locked()
def _at_fork_reinit(self) -> None:
try:
self.__lock._at_fork_reinit() # type: ignore
except AttributeError:
pass
def __enter__(self) -> None:
self.acquire()
def __exit__(self, type: str, value: str, traceback: Any) -> None:
self.release()
threading.Lock = ReplacementLock # type: ignore | Replace lock with a version that periodically yields and updates sleeping status. | replacement_lock | python | plasma-umass/scalene | scalene/replacement_lock.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_lock.py | Apache-2.0 |
def reinit(self) -> None:
"""Here for compatibility with ScaleneGPU."""
pass | Here for compatibility with ScaleneGPU. | reinit | python | plasma-umass/scalene | scalene/scalene_neuron.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_neuron.py | Apache-2.0 |
def test_get_native_imported_modules(cleanup_imports):
# Mock the is_native method to control which modules are considered native
with patch.object(ScaleneAnalysis, 'is_native', return_value=True):
source_code = """
import math
import os
from sys import path
"""
expected_imports = ['import math', 'import os', 'from sys import path']
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert set(actual_imports) == set(expected_imports), "The list of native imports does not match the expected list."
with patch.object(ScaleneAnalysis, 'is_native', return_value=False):
source_code = """
import math
import os
from sys import path
"""
expected_imports = []
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert actual_imports == expected_imports, "The list of native imports should be empty." | expected_imports = ['import math', 'import os', 'from sys import path']
actual_imports = ScaleneAnalysis.get_native_imported_modules(source_code)
assert set(actual_imports) == set(expected_imports), "The list of native imports does not match the expected list."
with patch.object(ScaleneAnalysis, 'is_native', return_value=False):
source_code = | test_get_native_imported_modules | python | plasma-umass/scalene | tests/test_coverup_1.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_1.py | Apache-2.0 |
def put(self, item: Optional[T]) -> None:
"""Add an item to the queue."""
self.queue.put(item) | Add an item to the queue. | put | python | plasma-umass/scalene | tests/test_coverup_2.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_2.py | Apache-2.0 |
def get(self) -> Optional[T]:
"""Get one item from the queue."""
return self.queue.get() | Get one item from the queue. | get | python | plasma-umass/scalene | tests/test_coverup_2.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_2.py | Apache-2.0 |
def start(self) -> None:
"""Start processing."""
# We use a daemon thread to defensively avoid hanging if we never join with it
if not self.thread:
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start() | Start processing. | start | python | plasma-umass/scalene | tests/test_coverup_2.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_2.py | Apache-2.0 |
def stop(self) -> None:
"""Stop processing."""
if self.thread:
self.queue.put(None)
# We need to join all threads before a fork() to avoid an inconsistent
# state, locked mutexes, etc.
self.thread.join()
self.thread = None | Stop processing. | stop | python | plasma-umass/scalene | tests/test_coverup_2.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_2.py | Apache-2.0 |
def run(self) -> None:
"""Run the function processing items until stop is called.
Executed in a separate thread."""
while True:
item = self.queue.get()
if item is None: # None => stop request
break
with self.lock:
self.process(*item) | Run the function processing items until stop is called.
Executed in a separate thread. | run | python | plasma-umass/scalene | tests/test_coverup_2.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_2.py | Apache-2.0 |
def free_port():
"""Find a free port for testing."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
return s.getsockname()[1] | Find a free port for testing. | free_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def occupied_port():
"""Create and occupy a port for testing."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.listen(1)
yield port
s.close() | Create and occupy a port for testing. | occupied_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def test_is_port_available_with_free_port(free_port):
"""Test that is_port_available returns True for a free port."""
assert is_port_available(free_port) == True | Test that is_port_available returns True for a free port. | test_is_port_available_with_free_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def test_is_port_available_with_occupied_port(occupied_port):
"""Test that is_port_available returns False for an occupied port."""
assert is_port_available(occupied_port) == False | Test that is_port_available returns False for an occupied port. | test_is_port_available_with_occupied_port | python | plasma-umass/scalene | tests/test_coverup_72.py | https://github.com/plasma-umass/scalene/blob/master/tests/test_coverup_72.py | Apache-2.0 |
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
output = [0] * len(zs)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
while abs(z) < 2 and n < maxiter:
z = z * z + c
n += 1
output[i] = n
return output | Calculate output list using Julia update rule | calculate_z_serial_purepython | python | plasma-umass/scalene | benchmarks/julia1_nopil.py | https://github.com/plasma-umass/scalene/blob/master/benchmarks/julia1_nopil.py | Apache-2.0 |
def calc_pure_python(desired_width, max_iterations):
"""Create a list of complex coordinates (zs) and complex
parameters (cs), build Julia set, and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# Build a list of coordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed;
# we use it to simulate a real-world scenario with several inputs to
# our function.
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print("Length of x:", len(x))
print("Total elements:", len(zs))
start_time = time.process_time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.process_time()
secs = end_time - start_time
sys.stdout.flush()
sys.stderr.flush()
output_str = "calculate_z_serial_purepython took " + str(secs) + " seconds"
print(output_str, file=sys.stderr)
sys.stderr.flush() | Create a list of complex coordinates (zs) and complex
parameters (cs), build Julia set, and display | calc_pure_python | python | plasma-umass/scalene | benchmarks/julia1_nopil.py | https://github.com/plasma-umass/scalene/blob/master/benchmarks/julia1_nopil.py | Apache-2.0 |
def __init__(self, D_in, H, D_out):
"""
In the constructor we construct three nn.Linear instances that we will use
in the forward pass.
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in, H)
self.middle_linear = torch.nn.Linear(H, H)
self.output_linear = torch.nn.Linear(H, D_out) | In the constructor we construct three nn.Linear instances that we will use
in the forward pass. | __init__ | python | plasma-umass/scalene | test/testpyt.py | https://github.com/plasma-umass/scalene/blob/master/test/testpyt.py | Apache-2.0 |
def forward(self, x):
"""
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once.
"""
h_relu = self.input_linear(x).clamp(min=0)
for _ in range(random.randint(0, 3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred | For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once. | forward | python | plasma-umass/scalene | test/testpyt.py | https://github.com/plasma-umass/scalene/blob/master/test/testpyt.py | Apache-2.0 |
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o) | Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements} | total_size | python | plasma-umass/scalene | test/test-size.py | https://github.com/plasma-umass/scalene/blob/master/test/test-size.py | Apache-2.0 |
def topoSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0:
# before recursing
if current not in visited:
visited.add(current)
stack.append((current, 1))
stack.extend((parent, 0) for parent in getParents(current))
else:
# after recursing
assert(current in visited)
results.append(current)
return results | Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node | topoSort | python | plasma-umass/scalene | test/original/bm_mdp.py | https://github.com/plasma-umass/scalene/blob/master/test/original/bm_mdp.py | Apache-2.0 |
def topoSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0:
# before recursing
if current not in visited:
visited.add(current)
stack.append((current, 1))
stack.extend((parent, 0) for parent in getParents(current))
else:
# after recursing
assert(current in visited)
results.append(current)
return results | Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node | topoSort | python | plasma-umass/scalene | test/expensive_benchmarks/bm_mdp.py | https://github.com/plasma-umass/scalene/blob/master/test/expensive_benchmarks/bm_mdp.py | Apache-2.0 |
def fit(self, X, y, iterations=500, disp=-1):
"""Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params.
"""
n_features = X.shape[1]
x = np.random.rand(n_features + 1)
minimizer = x
fmin = self.objective(x, X, y)
for t in range(iterations):
if disp != -1 and t % disp == 0:
print("At iteration", t, "f(minimizer) =", fmin)
alpha = 0.002 / math.sqrt(t + 1)
subgrad = self.subgradient(x, X, y)
x -= alpha * subgrad
objective = self.objective(x, X, y)
if (objective < fmin):
fmin = objective
minimizer = x
self.w = minimizer[:-1]
self.b = minimizer[-1] | Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params. | fit | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def objective(self, wb, X, y):
"""Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y.
"""
n_samples = X.shape[0]
w = wb[:-1]
b = wb[-1]
sum = 0
for n in range(n_samples):
sum += max(0, 1 - y[n] * (np.dot(X[n], w) + b))
return sum + self.lambda1 * LA.norm(w, 1) + self.lambda2 * (LA.norm(w, 2) ** 2) | Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y. | objective | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def subgradient(self, wb, X, y):
"""Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model
"""
n_samples = X.shape[0]
n_features = X.shape[1]
w = wb[:-1]
b = wb[-1]
subgrad = np.zeros(n_features + 1)
for i in range(n_features):
for n in range(n_samples):
subgrad[i] += (- y[n] * X[n][i]) if y[n] * (np.dot(X[n], w) + b) < 1 else 0
subgrad[i] += self.lambda1 * (-1 if w[i] < 0 else 1) + 2 * self.lambda2 * w[i]
for n in range(n_samples):
subgrad[-1] += - y[n] if y[n] * (np.dot(X[n], w) + b) < 1 else 0
return subgrad | Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model | subgradient | python | plasma-umass/scalene | test/automatic/svm/svm-original.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-original.py | Apache-2.0 |
def fit(self, X, y, iterations=500, disp=-1):
"""Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params.
"""
n_features = X.shape[1]
x = np.random.rand(n_features + 1)
minimizer = x
fmin = self.objective(x, X, y)
for t in range(iterations):
if disp != -1 and t % disp == 0:
print("At iteration", t, "f(minimizer) =", fmin)
alpha = 0.002 / math.sqrt(t + 1)
subgrad = self.subgradient(x, X, y)
x -= alpha * subgrad
objective = self.objective(x, X, y)
if (objective < fmin):
fmin = objective
minimizer = x
self.w = minimizer[:-1]
self.b = minimizer[-1] | Fit the model using the training data.
Arguments:
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Notes: This function must set member variables such that a subsequent call
to get_params or predict uses the learned parameters, overwriting
any parameter values previously set by calling set_params. | fit | python | plasma-umass/scalene | test/automatic/svm/svm-optimized.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-optimized.py | Apache-2.0 |
def objective(self, wb, X, y):
"""Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y.
"""
n_samples = X.shape[0]
w = wb[:-1]
b = wb[-1]
sum = 0
for n in range(n_samples):
sum += max(0, 1 - y[n] * (np.dot(X[n], w) + b))
return sum + self.lambda1 * LA.norm(w, 1) + self.lambda2 * (LA.norm(w, 2) ** 2) | Compute the objective function for the SVM.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
obj (float): value of the objective function evaluated on X and y. | objective | python | plasma-umass/scalene | test/automatic/svm/svm-optimized.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-optimized.py | Apache-2.0 |
def subgradient(self, wb, X, y):
"""Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model
"""
n_samples = X.shape[0]
n_features = X.shape[1]
w = wb[:-1]
b = wb[-1]
# Vectorized operations to replace for loops
subgrad = np.zeros(n_features + 1)
subgrad[:-1] = np.sum(-y[:, None] * X * (y * (X.dot(w) + b) < 1)[:, None], axis=0)
subgrad[:-1] += self.lambda1 * np.sign(w) + 2 * self.lambda2 * w
subgrad[-1] = np.sum(-y * (y * (X.dot(w) + b) < 1))
return subgrad | Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model | subgradient | python | plasma-umass/scalene | test/automatic/svm/svm-optimized.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-optimized.py | Apache-2.0 |
def subgradient_orig(self, wb, X, y):
"""Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model
"""
n_samples = X.shape[0]
n_features = X.shape[1]
w = wb[:-1]
b = wb[-1]
subgrad = np.zeros(n_features + 1)
for i in range(n_features):
for n in range(n_samples):
subgrad[i] += (- y[n] * X[n][i]) if y[n] * (np.dot(X[n], w) + b) < 1 else 0
subgrad[i] += self.lambda1 * (-1 if w[i] < 0 else 1) + 2 * self.lambda2 * w[i]
for n in range(n_samples):
subgrad[-1] += - y[n] if y[n] * (np.dot(X[n], w) + b) < 1 else 0
return subgrad | Compute the subgradient of the objective function.
Arguments:
wb (ndarray, shape = (n_features+1,)):
concatenation of the weight vector with the bias wb=[w,b]
X (ndarray, shape = (n_samples, n_features)):
Training input matrix where each row is a feature vector.
The data in X are passed in without a bias column!
y (ndarray, shape = (n_samples,)):
Training target. Each entry is either -1 or 1.
Returns:
subgrad (ndarray, shape = (n_features+1,)):
subgradient of the objective function with respect to
the coefficients wb=[w,b] of the linear model | subgradient_orig | python | plasma-umass/scalene | test/automatic/svm/svm-optimized.py | https://github.com/plasma-umass/scalene/blob/master/test/automatic/svm/svm-optimized.py | Apache-2.0 |
def split_params(data):
"""Split params between scanned and non-scanned"""
flat = traverse_util.flatten_dict(unfreeze(data))
split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
for k, v in flat.items():
if "FlaxBartEncoderLayers" in k:
split["scanned_encoder"][k] = v
elif "FlaxBartDecoderLayers" in k:
split["scanned_decoder"][k] = v
else:
split["standard"][k] = v
# remove empty keys
split = {k: v for k, v in split.items() if v}
for k, v in split.items():
split[k] = freeze(traverse_util.unflatten_dict(v))
return split | Split params between scanned and non-scanned | split_params | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def trainable_params(data, embeddings_only):
"""Keep only trainable parameters"""
if not embeddings_only:
return data
data = unfreeze(data)
trainable = {
"lm_head": data["lm_head"],
"model": {
"decoder": {
layer: data["model"]["decoder"][layer]
for layer in [
"embed_positions",
"embed_tokens",
"final_ln",
"layernorm_embedding",
]
}
},
}
return freeze(trainable) | Keep only trainable parameters | trainable_params | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def init_embeddings(model, params):
"""Reinitialize trainable embeddings"""
# Must match params in trainable_params() above
trainable_keypaths = [
"lm_head.kernel",
"model.decoder.embed_positions.embedding",
"model.decoder.embed_tokens.embedding",
"model.decoder.final_ln.bias",
"model.decoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.scale",
]
# Note: using private _missing_keys
init_keys = {tuple(k.split(".")) for k in trainable_keypaths}
model._missing_keys = init_keys
return model.init_weights(model.key, model.input_shape, params=params) | Reinitialize trainable embeddings | init_embeddings | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def create_learning_rate_fn() -> Callable[[int], jnp.array]:
"""Create the learning rate function."""
warmup_fn = optax.linear_schedule(
init_value=0.0,
end_value=training_args.learning_rate,
transition_steps=training_args.warmup_steps + 1, # ensure not 0
)
last_boundary = training_args.warmup_steps
# offset step when resuming
if training_args.lr_offset:
warmup_fn = optax.join_schedules(
schedules=[optax.constant_schedule(0.0), warmup_fn],
boundaries=[training_args.lr_offset],
)
last_boundary += training_args.lr_offset
if training_args.lr_decay is None:
return warmup_fn
elif training_args.lr_decay == "linear":
assert (
num_train_steps is not None
), "linear decay requires knowing the dataset length"
decay_fn = optax.linear_schedule(
init_value=training_args.learning_rate,
end_value=0,
transition_steps=num_train_steps - training_args.warmup_steps,
)
elif training_args.lr_decay == "exponential":
decay_fn = optax.exponential_decay(
init_value=training_args.learning_rate,
transition_steps=training_args.lr_transition_steps,
decay_rate=training_args.lr_decay_rate,
staircase=training_args.lr_staircase,
)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, decay_fn],
boundaries=[last_boundary],
)
return schedule_fn | Create the learning rate function. | main.create_learning_rate_fn | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def maybe_fn(fn, val, zeros, freq):
"""Call fn only if it is a logging step"""
return jax.lax.cond(
state.step % freq == 0,
fn,
lambda _: zeros,
val,
) | Call fn only if it is a logging step | main.main.train_step.maybe_fn | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def train_step(state, batch, train_time):
# get a minibatch (one gradient accumulation slice)
def get_minibatch(batch, grad_idx):
return jax.tree_util.tree_map(
lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False),
batch,
)
def compute_loss(params, minibatch, dropout_rng):
# minibatch has dim (batch_size, ...)
minibatch, labels = minibatch.pop("labels")
logits = state.apply_fn(
**minibatch, params=params, dropout_rng=dropout_rng, train=True
)[0]
return loss_fn(logits, labels)
grad_fn = jax.value_and_grad(compute_loss)
def loss_and_grad(grad_idx, dropout_rng):
# minibatch at grad_idx for gradient accumulation (None otherwise)
minibatch = (
get_minibatch(batch, grad_idx) if grad_idx is not None else batch
)
# ensure it is sharded properly
minibatch = with_sharding_constraint(minibatch, batch_spec)
# only 1 single rng per grad step, let us handle larger batch size (not sure why)
dropout_rng, _ = jax.random.split(dropout_rng)
if use_vmap_trick:
# "vmap trick", calculate loss and grads independently per dp_device
loss, grads = jax.vmap(
grad_fn, in_axes=(None, 0, None), out_axes=(0, 0)
)(state.params, minibatch, dropout_rng)
# ensure they are sharded correctly
loss = with_sharding_constraint(loss, batch_spec)
grads = with_sharding_constraint(grads, grad_param_spec)
# average across all devices
# Note: we could average per device only after gradient accumulation, right before params update
loss, grads = jax.tree_util.tree_map(
lambda x: jnp.mean(x, axis=0), (loss, grads)
)
else:
# "vmap trick" does not work in multi-hosts and requires too much hbm
loss, grads = grad_fn(state.params, minibatch, dropout_rng)
# ensure grads are sharded
grads = with_sharding_constraint(grads, param_spec)
# return loss and grads
return loss, grads, dropout_rng
if training_args.gradient_accumulation_steps == 1:
loss, grads, dropout_rng = loss_and_grad(None, state.dropout_rng)
else:
# create initial state for cumul_minibatch_step loop
init_minibatch_step = (
0.0,
with_sharding_constraint(
jax.tree_util.tree_map(jnp.zeros_like, state.params), param_spec
),
state.dropout_rng,
)
# accumulate gradients
def cumul_minibatch_step(grad_idx, cumul_loss_grad_dropout):
cumul_loss, cumul_grads, dropout_rng = cumul_loss_grad_dropout
loss, grads, dropout_rng = loss_and_grad(grad_idx, dropout_rng)
cumul_loss, cumul_grads = jax.tree_util.tree_map(
jnp.add, (cumul_loss, cumul_grads), (loss, grads)
)
cumul_grads = with_sharding_constraint(cumul_grads, param_spec)
return cumul_loss, cumul_grads, dropout_rng
# loop over gradients
loss, grads, dropout_rng = jax.lax.fori_loop(
0,
training_args.gradient_accumulation_steps,
cumul_minibatch_step,
init_minibatch_step,
)
grads = with_sharding_constraint(grads, param_spec)
# sum -> mean
loss, grads = jax.tree_util.tree_map(
lambda x: x / training_args.gradient_accumulation_steps, (loss, grads)
)
grads = with_sharding_constraint(grads, param_spec)
# update state
state = state.apply_gradients(
grads=grads,
dropout_rng=dropout_rng,
train_time=train_time,
train_samples=state.train_samples + batch_size_per_step,
)
metrics = {
"loss": loss,
"learning_rate": learning_rate_fn(state.step),
}
def maybe_fn(fn, val, zeros, freq):
"""Call fn only if it is a logging step"""
return jax.lax.cond(
state.step % freq == 0,
fn,
lambda _: zeros,
val,
)
# log additional metrics
params = trainable_params(state.params, training_args.embeddings_only)
grads = trainable_params(grads, training_args.embeddings_only)
if training_args.log_norm_steps:
zeros_norm = jax.tree_util.tree_map(lambda _: jnp.float32(0), params)
def norm(val):
return jax.tree_util.tree_map(lambda x: jnp.linalg.norm(x), val)
gradients_norm = maybe_fn(
norm, grads, zeros_norm, training_args.log_norm_steps
)
params_norm = maybe_fn(
norm, params, zeros_norm, training_args.log_norm_steps
)
metrics.update(
{
"gradients_norm": gradients_norm,
"params_norm": params_norm,
}
)
if training_args.log_histogram_steps:
zeros_hist = jax.tree_util.tree_map(
lambda _: jnp.histogram(jnp.zeros(1), density=True), params
)
def histogram(val):
return jax.tree_util.tree_map(
lambda x: jnp.histogram(x, density=True), val
)
gradients_hist = maybe_fn(
histogram, grads, zeros_hist, training_args.log_histogram_steps
)
params_hist = maybe_fn(
histogram, params, zeros_hist, training_args.log_histogram_steps
)
metrics.update(
{
"params_hist": params_hist,
"gradients_hist": gradients_hist,
}
)
return state, metrics | Call fn only if it is a logging step | main.train_step | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def update_state_metrics(self, state):
"""Update internal state metrics (logged at each call to be used as x-axis)"""
self.state_dict = {
f'train/{k.split("_")[-1]}': state[k]
for k in ["step", "epoch", "train_time", "train_samples"]
}
# timing metrics
new_step = int(state["step"])
new_time = time.perf_counter()
if new_step > self.step:
# remove time for eval & save
delta_time = new_time - self.time - self.offset_time
self.offset_time = 0
time_per_step = delta_time / (new_step - self.step)
self.step = new_step
self.time = new_time
self.log_time("train_per_step", time_per_step, offset=False)
self.log_time("train_per_log", delta_time, offset=False) | Update internal state metrics (logged at each call to be used as x-axis) | main.update_state_metrics | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# check arguments
if training_args.mp_devices > jax.local_device_count():
assert (
data_args.seed_dataset is not None
), "Seed dataset must be provided when model is split over multiple hosts"
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
# Set up our new model config
config_args = {
k: getattr(model_args, k)
for k in ["dropout", "activation_dropout", "attention_dropout"]
if getattr(model_args, k) is not None
}
config_args["gradient_checkpointing"] = training_args.gradient_checkpointing
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
model, params = DalleBart.from_pretrained(
model_args.model_name_or_path,
config=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
_do_init=False,
)
if training_args.embeddings_only and training_args.init_embeddings:
params = init_embeddings(model, params)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
_do_init=False,
)
params = None
for k, v in config_args.items():
setattr(model.config, k, v)
params_shape = model.params_shape_tree
# get model metadata
model_metadata = model_args.get_metadata()
# get PartitionSpec for model params (required to be a dict)
param_spec = set_partitions(params_shape, model.config.use_scan)
params_shape = freeze(params_shape)
if params is not None:
params = freeze(params)
# Load tokenizer
tokenizer = DalleBartTokenizer.from_pretrained(
model_args.tokenizer_name, use_fast=True
)
# Preprocessing the datasets.
# We need to normalize and tokenize inputs and targets.
dataset.preprocess(tokenizer=tokenizer, config=model.config)
# Initialize our training
dropout_rng = jax.random.PRNGKey(training_args.seed_model)
# Store some constant
num_epochs = training_args.num_train_epochs
# batch size
batch_size_per_node_per_grad_step = (
training_args.per_device_train_batch_size
* jax.local_device_count()
// training_args.mp_devices
)
batch_size_per_node = (
batch_size_per_node_per_grad_step * training_args.gradient_accumulation_steps
)
batch_size_per_step = batch_size_per_node * jax.process_count()
eval_batch_size_per_node = (
training_args.per_device_eval_batch_size
* jax.local_device_count()
// training_args.mp_devices
)
eval_batch_size_per_step = eval_batch_size_per_node * jax.process_count()
len_train_dataset, len_eval_dataset = dataset.length
steps_per_epoch = (
len_train_dataset // batch_size_per_node
if len_train_dataset is not None
else None
)
num_train_steps = (
steps_per_epoch * num_epochs if steps_per_epoch is not None else None
)
num_params = model.num_params(params_shape)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len_train_dataset}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(
f" Batch size per dp device = {training_args.per_device_train_batch_size}"
)
logger.info(f" Number of devices = {jax.device_count()}")
logger.info(
f" Gradient accumulation steps = {training_args.gradient_accumulation_steps}"
)
logger.info(f" Batch size per update = {batch_size_per_step}")
logger.info(f" Model parameters = {num_params:,}")
# set up wandb run
if jax.process_index() == 0:
# set default x-axis as 'train/step'
wandb.define_metric("*", step_metric="train/step")
# add interesting config parameters
wandb.config.update(
{
"len_train_dataset": len_train_dataset,
"len_eval_dataset": len_eval_dataset,
"batch_size_per_step": batch_size_per_step,
"num_params": num_params,
"model_config": model.config.to_dict(),
"num_devices": jax.device_count(),
"versions": {
"jax": jax.__version__,
"jaxlib": jaxlib.__version__,
"flax": flax.__version__,
"transformers": transformers.__version__,
"datasets": datasets.__version__,
"wandb": wandb.__version__,
"dalle_mini": dalle_mini.__version__,
},
}
)
# Create learning rate schedule
def create_learning_rate_fn() -> Callable[[int], jnp.array]:
"""Create the learning rate function."""
warmup_fn = optax.linear_schedule(
init_value=0.0,
end_value=training_args.learning_rate,
transition_steps=training_args.warmup_steps + 1, # ensure not 0
)
last_boundary = training_args.warmup_steps
# offset step when resuming
if training_args.lr_offset:
warmup_fn = optax.join_schedules(
schedules=[optax.constant_schedule(0.0), warmup_fn],
boundaries=[training_args.lr_offset],
)
last_boundary += training_args.lr_offset
if training_args.lr_decay is None:
return warmup_fn
elif training_args.lr_decay == "linear":
assert (
num_train_steps is not None
), "linear decay requires knowing the dataset length"
decay_fn = optax.linear_schedule(
init_value=training_args.learning_rate,
end_value=0,
transition_steps=num_train_steps - training_args.warmup_steps,
)
elif training_args.lr_decay == "exponential":
decay_fn = optax.exponential_decay(
init_value=training_args.learning_rate,
transition_steps=training_args.lr_transition_steps,
decay_rate=training_args.lr_decay_rate,
staircase=training_args.lr_staircase,
)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, decay_fn],
boundaries=[last_boundary],
)
return schedule_fn
learning_rate_fn = create_learning_rate_fn()
# create optimizer
trainable_params_shape = trainable_params(
params_shape, training_args.embeddings_only
)
if training_args.optim == "distributed_shampoo":
# parameters from https://github.com/tensorflow/lingvo/blob/03ee9d7cd50764b0424c7c863733c91fc0b053ec/lingvo/jax/optimizers.py#L729
graft_type = {
"sgd": GraftingType.SGD,
"adagrad": GraftingType.ADAGRAD,
"rmsprop": GraftingType.RMSPROP,
"rmsprop_normalized": GraftingType.RMSPROP_NORMALIZED,
"sqrt_n": GraftingType.SQRT_N,
"adagrad_normalized": GraftingType.ADAGRAD_NORMALIZED,
}[training_args.graft_type]
statistics_partition_spec = (
PartitionSpec(None, training_args.shard_shampoo_across, None)
if training_args.shard_shampoo_across != "2d"
else PartitionSpec(None, "dp", "mp")
)
opt = distributed_shampoo(
learning_rate_fn,
block_size=training_args.block_size,
beta1=training_args.beta1,
beta2=training_args.beta2,
diagonal_epsilon=1e-10,
matrix_epsilon=1e-6,
weight_decay=training_args.weight_decay,
start_preconditioning_step=max(
training_args.preconditioning_compute_steps + 1, 101
),
preconditioning_compute_steps=training_args.preconditioning_compute_steps,
statistics_compute_steps=1,
best_effort_shape_interpretation=True,
graft_type=graft_type,
nesterov=training_args.nesterov,
exponent_override=0,
statistics_partition_spec=statistics_partition_spec,
preconditioner_partition_spec=PartitionSpec(
training_args.shard_shampoo_across, None, None
)
if training_args.shard_shampoo_across != "2d"
else PartitionSpec(
"mp" if training_args.mp_devices > training_args.dp_devices else "dp",
None,
None,
),
num_devices_for_pjit=training_args.dp_devices,
shard_optimizer_states=True,
inverse_failure_threshold=0.1,
moving_average_for_momentum=True,
skip_preconditioning_dim_size_gt=training_args.skip_preconditioning_dim_size_gt,
clip_by_scaled_gradient_norm=None,
precision=jax.lax.Precision.HIGHEST,
best_effort_memory_usage_reduction=training_args.optim_quantized,
)
# get the real optimizer and helper functions
update_fn = opt.update
optimizer = {}
opt_fn = {}
for k, p in split_params(trainable_params_shape).items():
if "scanned" in k:
p = jax.eval_shape(
lambda x: jax.tree_util.tree_map(lambda y: y[0], x), p
)
optimizer[k] = opt.init(p)
opt_fn[k] = NamedTuple("opt_fn", pspec_fn=Any, shape_and_dtype_fn=Any)(
optimizer[k].pspec_fn, optimizer[k].shape_and_dtype_fn
)
optimizer[k] = optax.GradientTransformation(optimizer[k].init_fn, update_fn)
elif training_args.optim == "adam":
optimizer = optax.adamw(
learning_rate=learning_rate_fn,
b1=training_args.beta1,
b2=training_args.beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
)
optimizer = {k: optimizer for k in split_params(trainable_params_shape)}
elif training_args.optim == "adafactor":
# We use the default parameters here to initialize adafactor,
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
optimizer = optax.adafactor(
learning_rate=learning_rate_fn,
clipping_threshold=training_args.max_grad_norm,
weight_decay_rate=training_args.weight_decay,
)
optimizer = {k: optimizer for k in split_params(trainable_params_shape)}
# get PartitionSpec for optimizer state
def get_opt_state_spec_and_shape():
# get opt_state shape without actual init
opt_state_shape = {}
for k, p in split_params(trainable_params_shape).items():
if "scanned" not in k:
opt_state_shape[k] = jax.eval_shape(optimizer[k].init, p)
else:
opt_state_shape[k] = jax.eval_shape(jax.vmap(optimizer[k].init), p)
if training_args.optim == "adafactor":
# factorized state must be replicated (rank different than params)
opt_state_spec = {k: None for k in split_params(trainable_params_shape)}
elif training_args.optim in ["adam", "distributed_shampoo"]:
def _opt_state_spec_per_leaf(x, spec):
if isinstance(x, FrozenDict):
# variables with same structure as params
return spec
else:
# other variables such as count
return None
split_spec = split_params(set_partitions(trainable_params_shape, False))
opt_state_spec = {}
for k, p in split_params(trainable_params_shape).items():
if "scanned" in k:
p = jax.eval_shape(
lambda x: jax.tree_util.tree_map(lambda y: y[0], x), p
)
if training_args.optim == "adam":
opt_state_spec[k] = jax.tree_util.tree_map(
partial(_opt_state_spec_per_leaf, spec=split_spec[k]),
opt_state_shape[k],
# return None spec for empty elements
is_leaf=lambda x: isinstance(x, (FrozenDict, optax.EmptyState)),
)
elif training_args.optim == "distributed_shampoo":
opt_state_spec[k] = opt_fn[k].pspec_fn(
p,
split_spec[k],
statistics_partition_spec,
)
# add dimension for scanned params
if "scanned" in k:
opt_state_spec[k] = jax.tree_util.tree_map(
lambda x: PartitionSpec(*(None,) + x)
if x is not None
else None,
opt_state_spec[k],
is_leaf=lambda x: isinstance(x, PartitionSpec),
)
else:
raise NotImplementedError
return freeze(opt_state_spec), freeze(opt_state_shape)
opt_state_spec, opt_state_shape = get_opt_state_spec_and_shape()
# create a mesh
mesh_shape = (training_args.dp_devices, training_args.mp_devices)
devices = np.asarray(jax.devices()).reshape(*mesh_shape)
mesh = maps.Mesh(devices, ("dp", "mp"))
logger.info(f" Mesh shape: {mesh_shape}")
# define TrainState
class TrainState(struct.PyTreeNode):
step: int
params: core.FrozenDict[str, Any]
opt_state: optax.OptState
apply_fn: Callable = struct.field(pytree_node=False)
tx: optax.GradientTransformation = struct.field(pytree_node=False)
dropout_rng: jnp.ndarray = None
epoch: int = 0
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
def apply_gradients(self, *, grads, **kwargs):
grads = split_params(trainable_params(grads, training_args.embeddings_only))
params = split_params(
trainable_params(self.params, training_args.embeddings_only)
)
opt_state = {}
# we loop over keys: "standard", "scanned_encoder", "scanned_decoder"
for k, param in params.items():
update_fn = self.tx[k].update
if "scanned" in k:
update_fn = jax.vmap(update_fn, in_axes=(0, 0, 0), out_axes=(0, 0))
updates, new_opt_state = update_fn(grads[k], self.opt_state[k], param)
params[k] = optax.apply_updates(param, updates)
opt_state[k] = new_opt_state
params = unsplit_params(params)
# merge with non-trainable params
params, new_params = traverse_util.flatten_dict(
unfreeze(self.params)
), traverse_util.flatten_dict(unfreeze(params))
params.update(new_params)
params = freeze(traverse_util.unflatten_dict(params))
return self.replace(
step=self.step + 1,
params=params,
opt_state=freeze(opt_state),
**kwargs,
)
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
opt_state = {}
for k, p in split_params(
trainable_params(params, training_args.embeddings_only)
).items():
init_fn = tx[k].init
if "scanned" in k:
init_fn = jax.vmap(init_fn)
opt_state[k] = init_fn(p)
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
opt_state=freeze(opt_state),
**kwargs,
)
# define state spec
state_spec = TrainState(
params=param_spec,
opt_state=opt_state_spec,
dropout_rng=None,
step=None,
epoch=None,
train_time=None,
train_samples=None,
apply_fn=model.__call__,
tx=optimizer,
)
# init params if not available yet
def maybe_init_params(params):
if params is not None:
# model params are correctly loaded
return params
else:
# params have not been initialized yet
return model.init_weights(model.key, model.input_shape)
with mesh:
logger.info(" Creating state")
# restore metadata
attr_state = {}
keys = ["train_time", "train_samples"]
if model_args.restore_state:
keys += ["step", "epoch"]
attr_state = {k: v for k, v in model_metadata.items() if k in keys}
if not model_args.restore_state:
def init_state(params):
return TrainState.create(
apply_fn=model.__call__,
tx=optimizer,
params=maybe_init_params(params),
dropout_rng=dropout_rng,
**attr_state,
)
state = pjit(
init_state,
in_axis_resources=(param_spec,)
if model_args.model_name_or_path
else None,
out_axis_resources=state_spec,
donate_argnums=(0,),
)(params)
else:
# load opt_state
opt_state = from_bytes(opt_state_shape, model_args.get_opt_state())
def restore_state(params, opt_state):
return TrainState(
apply_fn=model.__call__,
tx=optimizer,
params=params,
opt_state=opt_state,
dropout_rng=dropout_rng,
**attr_state,
)
state = pjit(
restore_state,
in_axis_resources=(
param_spec,
opt_state_spec,
),
out_axis_resources=state_spec,
donate_argnums=(0, 1),
)(params, opt_state)
# remove opt_state from CPU
del opt_state
# free CPU memory
del params, opt_state_spec, opt_state_shape
# define batch specs
batch_spec = PartitionSpec("dp")
grad_batch_spec = PartitionSpec(None, "dp")
# define loss
def loss_fn(logits, labels):
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1]))
loss = loss.mean()
return loss
# "vmap trick" avoids a crash when mp_devices > 1 (not sure why it happens)
# lead to better perf: see https://wandb.ai/dalle-mini/dalle-mini/reports/JAX-pmap-vs-pjit--VmlldzoxNDg1ODA2
use_vmap_trick = training_args.use_vmap_trick
# make grad_param_spec for vmap
if use_vmap_trick:
grad_param_spec = jax.tree_util.tree_map(
lambda x: PartitionSpec(*("dp",) + (x if x is not None else (None,))),
param_spec,
)
# Define gradient update step fn
def train_step(state, batch, train_time):
# get a minibatch (one gradient accumulation slice)
def get_minibatch(batch, grad_idx):
return jax.tree_util.tree_map(
lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False),
batch,
)
def compute_loss(params, minibatch, dropout_rng):
# minibatch has dim (batch_size, ...)
minibatch, labels = minibatch.pop("labels")
logits = state.apply_fn(
**minibatch, params=params, dropout_rng=dropout_rng, train=True
)[0]
return loss_fn(logits, labels)
grad_fn = jax.value_and_grad(compute_loss)
def loss_and_grad(grad_idx, dropout_rng):
# minibatch at grad_idx for gradient accumulation (None otherwise)
minibatch = (
get_minibatch(batch, grad_idx) if grad_idx is not None else batch
)
# ensure it is sharded properly
minibatch = with_sharding_constraint(minibatch, batch_spec)
# only 1 single rng per grad step, let us handle larger batch size (not sure why)
dropout_rng, _ = jax.random.split(dropout_rng)
if use_vmap_trick:
# "vmap trick", calculate loss and grads independently per dp_device
loss, grads = jax.vmap(
grad_fn, in_axes=(None, 0, None), out_axes=(0, 0)
)(state.params, minibatch, dropout_rng)
# ensure they are sharded correctly
loss = with_sharding_constraint(loss, batch_spec)
grads = with_sharding_constraint(grads, grad_param_spec)
# average across all devices
# Note: we could average per device only after gradient accumulation, right before params update
loss, grads = jax.tree_util.tree_map(
lambda x: jnp.mean(x, axis=0), (loss, grads)
)
else:
# "vmap trick" does not work in multi-hosts and requires too much hbm
loss, grads = grad_fn(state.params, minibatch, dropout_rng)
# ensure grads are sharded
grads = with_sharding_constraint(grads, param_spec)
# return loss and grads
return loss, grads, dropout_rng
if training_args.gradient_accumulation_steps == 1:
loss, grads, dropout_rng = loss_and_grad(None, state.dropout_rng)
else:
# create initial state for cumul_minibatch_step loop
init_minibatch_step = (
0.0,
with_sharding_constraint(
jax.tree_util.tree_map(jnp.zeros_like, state.params), param_spec
),
state.dropout_rng,
)
# accumulate gradients
def cumul_minibatch_step(grad_idx, cumul_loss_grad_dropout):
cumul_loss, cumul_grads, dropout_rng = cumul_loss_grad_dropout
loss, grads, dropout_rng = loss_and_grad(grad_idx, dropout_rng)
cumul_loss, cumul_grads = jax.tree_util.tree_map(
jnp.add, (cumul_loss, cumul_grads), (loss, grads)
)
cumul_grads = with_sharding_constraint(cumul_grads, param_spec)
return cumul_loss, cumul_grads, dropout_rng
# loop over gradients
loss, grads, dropout_rng = jax.lax.fori_loop(
0,
training_args.gradient_accumulation_steps,
cumul_minibatch_step,
init_minibatch_step,
)
grads = with_sharding_constraint(grads, param_spec)
# sum -> mean
loss, grads = jax.tree_util.tree_map(
lambda x: x / training_args.gradient_accumulation_steps, (loss, grads)
)
grads = with_sharding_constraint(grads, param_spec)
# update state
state = state.apply_gradients(
grads=grads,
dropout_rng=dropout_rng,
train_time=train_time,
train_samples=state.train_samples + batch_size_per_step,
)
metrics = {
"loss": loss,
"learning_rate": learning_rate_fn(state.step),
}
def maybe_fn(fn, val, zeros, freq):
"""Call fn only if it is a logging step"""
return jax.lax.cond(
state.step % freq == 0,
fn,
lambda _: zeros,
val,
)
# log additional metrics
params = trainable_params(state.params, training_args.embeddings_only)
grads = trainable_params(grads, training_args.embeddings_only)
if training_args.log_norm_steps:
zeros_norm = jax.tree_util.tree_map(lambda _: jnp.float32(0), params)
def norm(val):
return jax.tree_util.tree_map(lambda x: jnp.linalg.norm(x), val)
gradients_norm = maybe_fn(
norm, grads, zeros_norm, training_args.log_norm_steps
)
params_norm = maybe_fn(
norm, params, zeros_norm, training_args.log_norm_steps
)
metrics.update(
{
"gradients_norm": gradients_norm,
"params_norm": params_norm,
}
)
if training_args.log_histogram_steps:
zeros_hist = jax.tree_util.tree_map(
lambda _: jnp.histogram(jnp.zeros(1), density=True), params
)
def histogram(val):
return jax.tree_util.tree_map(
lambda x: jnp.histogram(x, density=True), val
)
gradients_hist = maybe_fn(
histogram, grads, zeros_hist, training_args.log_histogram_steps
)
params_hist = maybe_fn(
histogram, params, zeros_hist, training_args.log_histogram_steps
)
metrics.update(
{
"params_hist": params_hist,
"gradients_hist": gradients_hist,
}
)
return state, metrics
# Define eval fn
eval_model = (
model
if model_args.dtype == "float32"
else DalleBart(
model.config,
seed=training_args.seed_model,
dtype=jnp.float32,
_do_init=False,
)
)
def eval_step(state, batch):
def compute_eval_loss(batch):
batch, labels = batch.pop("labels")
logits = eval_model(**batch, params=state.params, train=False)[0]
return loss_fn(logits, labels)
if use_vmap_trick:
loss = jax.vmap(compute_eval_loss)(batch)
# ensure they are sharded correctly
loss = with_sharding_constraint(loss, batch_spec)
# average across all devices
loss = jnp.mean(loss)
else:
loss = compute_eval_loss(batch)
return loss
# Create parallel version of the train and eval step
p_train_step = pjit(
train_step,
in_axis_resources=(
state_spec,
grad_batch_spec
if training_args.gradient_accumulation_steps > 1
else batch_spec,
None,
),
out_axis_resources=(state_spec, None),
donate_argnums=(0,),
)
p_eval_step = pjit(
eval_step,
in_axis_resources=(state_spec, batch_spec),
out_axis_resources=None,
)
# define metrics logger
class MetricsLogger:
def __init__(self, step):
# keep state
self.state_dict = {}
# estimate speed
self.step = step
self.time = time.perf_counter()
self.offset_time = 0.0
def update_state_metrics(self, state):
"""Update internal state metrics (logged at each call to be used as x-axis)"""
self.state_dict = {
f'train/{k.split("_")[-1]}': state[k]
for k in ["step", "epoch", "train_time", "train_samples"]
}
# timing metrics
new_step = int(state["step"])
new_time = time.perf_counter()
if new_step > self.step:
# remove time for eval & save
delta_time = new_time - self.time - self.offset_time
self.offset_time = 0
time_per_step = delta_time / (new_step - self.step)
self.step = new_step
self.time = new_time
self.log_time("train_per_step", time_per_step, offset=False)
self.log_time("train_per_log", delta_time, offset=False)
def log_time(self, key, duration, offset=True):
if jax.process_index() == 0:
wandb.log({f"time/{key}": duration, **self.state_dict})
if offset:
self.offset_time += duration
def log(self, metrics, prefix=None):
if jax.process_index() == 0:
log_metrics = {}
for k, v in metrics.items():
if "_norm" in k:
if self.step % training_args.log_norm_steps == 0:
log_metrics[f"{k}/"] = unfreeze(v)
elif "_hist" in k:
if self.step % training_args.log_histogram_steps == 0:
v = jax.tree_util.tree_map(
lambda x: jax.device_get(x), unfreeze(v)
)
v = jax.tree_util.tree_map(
lambda x: wandb.Histogram(np_histogram=x),
v,
is_leaf=lambda x: isinstance(x, tuple),
)
log_metrics[f"{k}/"] = v
else:
if prefix is not None:
k = f"{prefix}/{k}"
log_metrics[k] = v
wandb.log({**log_metrics, **self.state_dict})
# keep local copy of state
local_state = {
k: jax.device_get(getattr(state, k)).item()
for k in ["step", "epoch", "train_time", "train_samples"]
}
# init variables
start_time = time.perf_counter() - local_state["train_time"]
train_metrics = None
evaluation_ran = False
save_model_ran = False
metrics_logger = MetricsLogger(local_state["step"])
epochs = tqdm(
range(local_state["epoch"], num_epochs),
desc=f"Epoch ... (1/{num_epochs})",
position=0,
disable=jax.process_index() > 0,
)
def run_evaluation():
# ======================== Evaluating ==============================
if training_args.do_eval:
start_eval_time = time.perf_counter()
# get validation datasets
val_datasets = list(
dataset.other_eval_datasets.keys()
if hasattr(dataset, "other_eval_datasets")
else []
)
val_datasets += ["eval"]
for val_dataset in val_datasets:
eval_loader = dataset.dataloader(
val_dataset,
eval_batch_size_per_step
* max(1, training_args.mp_devices // jax.local_device_count()),
)
eval_steps = (
len_eval_dataset // eval_batch_size_per_step
if len_eval_dataset is not None
else None
)
eval_loss = []
for batch in tqdm(
eval_loader,
desc="Evaluating...",
position=2,
leave=False,
total=eval_steps,
disable=jax.process_index() > 0,
):
# need to keep only eval_batch_size_per_node items relevant to the node
batch = jax.tree_util.tree_map(
lambda x: x.reshape(
(jax.process_count(), eval_batch_size_per_node)
+ x.shape[1:]
),
batch,
)
batch = jax.tree_util.tree_map(
lambda x: x[jax.process_index()], batch
)
# add dp dimension when using "vmap trick"
if use_vmap_trick:
bs_shape = (
jax.local_device_count() // training_args.mp_devices,
training_args.per_device_eval_batch_size,
)
batch = jax.tree_util.tree_map(
lambda x: x.reshape(bs_shape + x.shape[1:]), batch
)
# freeze batch to pass safely to jax transforms
batch = freeze(batch)
# accumulate losses async
eval_loss.append(p_eval_step(state, batch))
# get the mean of the loss
eval_loss = jnp.stack(eval_loss)
eval_loss = jnp.mean(eval_loss)
eval_metrics = {"loss": eval_loss}
# log metrics
metrics_logger.log(eval_metrics, prefix=val_dataset)
# Print metrics and update progress bar
desc = f"Epoch... ({epoch + 1}/{num_epochs} | {val_dataset} Loss: {eval_metrics['loss']})"
epochs.write(desc)
epochs.desc = desc
# log time
metrics_logger.log_time("eval", time.perf_counter() - start_eval_time)
return eval_metrics
def run_save_model(state, eval_metrics=None):
if jax.process_index() == 0:
start_save_time = time.perf_counter()
output_dir = training_args.output_dir
use_bucket = output_dir.startswith("gs://")
if use_bucket:
bucket_path = Path(output_dir[5:]) / wandb.run.id / f"step_{state.step}"
bucket, dir_path = str(bucket_path).split("/", 1)
tmp_dir = tempfile.TemporaryDirectory()
output_dir = tmp_dir.name
# save model
params = jax.device_get(state.params)
model.save_pretrained(
output_dir,
params=params,
)
# save tokenizer
tokenizer.save_pretrained(output_dir)
# copy to bucket
if use_bucket:
client = storage.Client()
bucket = client.bucket(bucket)
for filename in Path(output_dir).glob("*"):
blob_name = str(Path(dir_path) / "model" / filename.name)
blob = bucket.blob(blob_name)
blob.upload_from_filename(str(filename))
tmp_dir.cleanup()
# save state
opt_state = jax.device_get(state.opt_state)
if use_bucket:
blob_name = str(Path(dir_path) / "state" / "opt_state.msgpack")
blob = bucket.blob(blob_name)
blob.upload_from_file(io.BytesIO(to_bytes(opt_state)))
else:
with (Path(output_dir) / "opt_state.msgpack").open("wb") as f:
f.write(to_bytes(opt_state))
# save to W&B
if training_args.log_model:
# save some space
c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache()
c.cleanup(wandb.util.from_human_size("20GB"))
metadata = {
k: jax.device_get(getattr(state, k)).item()
for k in ["step", "epoch", "train_time", "train_samples"]
}
metadata["num_params"] = num_params
if eval_metrics is not None:
metadata["eval"] = eval_metrics
# create model artifact
if use_bucket:
metadata["bucket_path"] = f"gs://{bucket_path}/model"
artifact = wandb.Artifact(
name=f"model-{wandb.run.id}",
type="DalleBart_model",
metadata=metadata,
)
if use_bucket:
artifact.add_reference(metadata["bucket_path"])
else:
for filename in [
"config.json",
"flax_model.msgpack",
"merges.txt",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json",
]:
artifact.add_file(
f"{Path(training_args.output_dir) / filename}"
)
wandb.run.log_artifact(artifact)
# create state artifact
if use_bucket:
metadata["bucket_path"] = f"gs://{bucket_path}/state"
artifact_state = wandb.Artifact(
name=f"state-{wandb.run.id}",
type="DalleBart_state",
metadata=metadata,
)
if use_bucket:
artifact_state.add_reference(metadata["bucket_path"])
else:
artifact_state.add_file(
f"{Path(training_args.output_dir) / 'opt_state.msgpack'}"
)
wandb.run.log_artifact(artifact_state)
metrics_logger.log_time("save_model", time.perf_counter() - start_save_time)
logger.info(" Ready to start training")
with mesh:
for epoch in epochs:
state = state.replace(epoch=epoch)
local_state["epoch"] = epoch
# ======================== Training ================================
metrics_logger.update_state_metrics(local_state)
metrics_logger.log({})
if training_args.do_train:
# load data - may be replicated on multiple nodes
node_groups = max(
1, training_args.mp_devices // jax.local_device_count()
)
loader_bs = batch_size_per_node * node_groups
train_loader = dataset.dataloader(
"train",
loader_bs,
epoch,
)
# train
for batch in tqdm(
train_loader,
desc="Training...",
position=1,
leave=False,
total=steps_per_epoch,
disable=jax.process_index() > 0,
):
# calculate delta time (we have a lag of one step but it's ok)
train_time = time.perf_counter() - start_time
# reset control variables
evaluation_ran = False
save_model_ran = False
# set correct shape to batch
# - add grad_step dim if gradient_accumulation_steps > 1
bs_shape = (
(batch_size_per_node_per_grad_step * node_groups,)
if not use_vmap_trick
else (
jax.local_device_count()
* node_groups
// training_args.mp_devices, # local dp devices
training_args.per_device_train_batch_size,
)
)
if training_args.gradient_accumulation_steps > 1:
# reshape data into (gradient_accumulation_steps, batch_per_node, ...)
# to avoid any data redistribution when sharding
bs_shape = (
training_args.gradient_accumulation_steps,
) + bs_shape
# reshape batch
batch = jax.tree_util.tree_map(
lambda x: x.reshape(bs_shape + x.shape[1:]),
batch,
)
# freeze batch to pass safely to jax transforms
batch = freeze(batch)
# train step
state, train_metrics = p_train_step(state, batch, train_time)
local_state["step"] += 1
local_state["train_time"] = train_time
local_state["train_samples"] += batch_size_per_step
if (
local_state["step"] % training_args.logging_steps == 0
and jax.process_index() == 0
):
metrics_logger.update_state_metrics(local_state)
metrics_logger.log(train_metrics, prefix="train")
eval_metrics = None
if local_state["step"] % training_args.eval_steps == 0:
eval_metrics = run_evaluation()
evaluation_ran = True
if local_state["step"] % training_args.save_steps == 0:
run_save_model(state, eval_metrics)
save_model_ran = True
# log final train metrics
if train_metrics is not None:
metrics_logger.update_state_metrics(local_state)
metrics_logger.log(train_metrics, prefix="train")
epochs.write(
f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metrics['loss']}, Learning Rate: {train_metrics['learning_rate']})"
)
# Final evaluation at the end of each epoch
if not evaluation_ran:
eval_metrics = run_evaluation()
# save checkpoint after each epoch
if not save_model_ran:
run_save_model(state, eval_metrics) | Create the learning rate function. | main | python | borisdayma/dalle-mini | tools/train/train.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/train.py | Apache-2.0 |
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision)
s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision)
return (
i + 1,
s_v,
s_new,
s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance),
) | One step of power iteration. | power_iteration._iter_body | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def power_iteration(
matrix,
num_iters=100,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
r"""Power iteration algorithm.
The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
of `A`, and a vector v, which is the corresponding eigenvector of `A`.
References:
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
Args:
matrix: the symmetric PSD matrix.
num_iters: Number of iterations.
error_tolerance: Iterative exit condition.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
Returns:
eigen vector, eigen value
"""
matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision)
s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision)
return (
i + 1,
s_v,
s_new,
s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance),
)
# Figure out how to use step as seed for random.
v_0 = (
np.random.RandomState(1729).uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out | matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration. | power_iteration | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def mat_power(
mat_m,
p,
precision=lax.Precision.HIGHEST,
):
"""A simple matrix power method. M^p where p can be TracedValue."""
power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE)
def _iter_condition(state):
i, _, _ = state
return i > 0
def _iter_body(state):
i, power, mat = state
power = jax.lax.cond(
i % 2 == 1,
lambda: jnp.matmul(mat, power, precision=precision),
lambda: power,
)
i //= 2
mat = jnp.matmul(mat, mat, precision=precision)
return i, power, mat
_, result, _ = lax.while_loop(_iter_condition, _iter_body, (p, power, mat_m))
return result | A simple matrix power method. M^p where p can be TracedValue. | mat_power | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pth_root_difference(w, alpha, beta, p):
"""Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
a = w + alpha
b = w + beta
a_minus_b = alpha - beta
exp = -1 / p
def _stable_subtract(b, a_minus_b):
# Mathematically identical to the target expression, with (w+beta)^(-1/p)
# term factored out and w cancellation in the subtraction.
return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
return jnp.where(
# Choose the branch with the best log1p approximation.
jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
-_stable_subtract(a, -a_minus_b),
_stable_subtract(b, a_minus_b),
) | Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p). | _pth_root_difference | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
relative_matrix_epsilon=True,
lobpcg_topk_precondition=0,
lobpcg_max_iter=0,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
`lobpcg_topk_precondition`.
Returns:
matrix^(-1/p) and the error
"""
# If the input is not square, materialize it from the concatenated form.
if matrix.shape[0] != matrix.shape[1]:
matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
assert matrix.shape[0] == matrix.shape[1]
# We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
original_matrix = matrix
if lobpcg_topk_precondition > 0:
# TODO(vladf): reuse previous top-k as the initial search directions
pad_shape = (matrix_size - lobpcg_topk_precondition, lobpcg_topk_precondition)
search_dirs = jnp.concatenate(
(jnp.eye(lobpcg_topk_precondition), jnp.zeros(pad_shape)), axis=0
)
eigvals, eigvecs, actual_iters = linalg.lobpcg_standard(
matrix,
search_dirs,
lobpcg_topk_precondition if lobpcg_max_iter == 0 else lobpcg_max_iter,
)
del actual_iters # TODO(vladf): return diagnostics dictionary
# The minimal eigenvalue among top-k becomes the maximal one in the whole
# matrix after deflation.
max_ev = jnp.min(eigvals)
deflation = eigvals - max_ev
scaled_vecs = eigvecs * jnp.sqrt(deflation)
# Deflate out top eigenvectors to reduce matrix condition number.
matrix -= scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
# Only use power iteration if lobpcg wasn't already used to derive the
# top eigenvalue.
elif relative_matrix_epsilon:
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
eigvals, eigvecs = None, None # Unused but required by pytype.
# Use absolute matrix epsilon scaling otherwise.
else:
max_ev = 1.0
eigvals, eigvecs = None, None # Unused but required by pytype.
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon) ** alpha
error = jnp.array(0, jnp.float32)
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype)
if lobpcg_topk_precondition > 0:
# Since we deflated the top eigenvectors prior to p-th root inverse,
# the resultant matrix has larger eigenvalues associated with those
# same eigenvectors, which we need to now re-deflate.
#
# Note that _pth_root_difference returns positive values for this
# particular argument ordering as min(eigvals) <= eigvals for the
# jnp.sqrt below.
pth_diff = _pth_root_difference(ridge_epsilon, jnp.min(eigvals), eigvals, p)
scaled_vecs = eigvecs * jnp.sqrt(pth_diff)
resultant_mat_h = (
resultant_mat_h.astype(scaled_vecs.dtype)
- scaled_vecs.dot(scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
).astype(orig_dtype)
mat_m = jnp.matmul(
mat_power(resultant_mat_h, p),
original_matrix,
precision=jax.lax.Precision.HIGHEST,
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
return resultant_mat_h, error | Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
`lobpcg_topk_precondition`.
Returns:
matrix^(-1/p) and the error | matrix_inverse_pth_root | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def merge_small_dims(shape_to_merge, max_dim):
"""Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape.
"""
if shape_to_merge and np.all(np.array(shape_to_merge) == 1):
return [1]
resulting_shape = []
product = 1
for d in shape_to_merge:
if product * d <= max_dim:
product *= d
else:
if product > 1:
resulting_shape.append(product)
product = d
if product > 1:
resulting_shape.append(product)
return resulting_shape | Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape. | merge_small_dims | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_square_matrix(mat, max_size):
"""Pad a square matrix up to max_size.
Args:
mat: a matrix to pad.
max_size: matrix size requested.
Returns:
Given M returns [[M, 0], [0, I]]
"""
rows, cols = mat.shape
if rows != cols:
raise ValueError(
f"Must have rows == cols, instead got rows={rows}, cols={cols}"
)
if cols > max_size:
raise ValueError(
f"Must have cols <= max_size. Instead got cols={cols}, max_size={max_size}."
)
if rows == max_size:
return mat
pad_size = max_size - rows
zs1 = jnp.zeros([rows, pad_size], dtype=mat.dtype)
zs2 = jnp.zeros([pad_size, rows], dtype=mat.dtype)
eye = jnp.eye(pad_size, dtype=mat.dtype)
mat = jnp.concatenate([mat, zs1], 1)
mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0)
return mat | Pad a square matrix up to max_size.
Args:
mat: a matrix to pad.
max_size: matrix size requested.
Returns:
Given M returns [[M, 0], [0, I]] | pad_square_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def make_sliced_padding(
symmetric_block_size,
num_blocks,
starting_block,
dtype,
):
"""Returns padding for symmetric block matrix.
Specifically, the padding is given concatenated rectangular matrices
representing the lower-triangular rows below the starting block. For example,
if we want to pad the symmetric matrix
M = [[A, B^T]
[B, C]],
the desired output (in terms of the full matrix) with num_blocks = 4 is
M_padded = [[A, B^T, 0, 0]
[B, C, 0, 0]
[0, 0, I, 0]
0, 0, 0, I].
We would represent M as the block matrix mat = [A, B, C]. In this form, the
additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower
triangular parts in the third and fourth rows).
Args:
symmetric_block_size: The size of each block.
num_blocks: The total number of blocks.
starting_block: The block where to start the padding.
dtype: The type to use for the blocks.
"""
if starting_block == num_blocks:
return jnp.zeros(shape=(symmetric_block_size, 0), dtype=dtype)
blocks = []
for i in range(starting_block, num_blocks):
blocks.append(
jnp.zeros(
shape=(symmetric_block_size, symmetric_block_size * i), dtype=dtype
)
)
blocks.append(jnp.eye(symmetric_block_size, dtype=dtype))
return jnp.concatenate(blocks, axis=-1) | Returns padding for symmetric block matrix.
Specifically, the padding is given concatenated rectangular matrices
representing the lower-triangular rows below the starting block. For example,
if we want to pad the symmetric matrix
M = [[A, B^T]
[B, C]],
the desired output (in terms of the full matrix) with num_blocks = 4 is
M_padded = [[A, B^T, 0, 0]
[B, C, 0, 0]
[0, 0, I, 0]
0, 0, 0, I].
We would represent M as the block matrix mat = [A, B, C]. In this form, the
additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower
triangular parts in the third and fourth rows).
Args:
symmetric_block_size: The size of each block.
num_blocks: The total number of blocks.
starting_block: The block where to start the padding.
dtype: The type to use for the blocks. | make_sliced_padding | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_block_symmetric_matrix(
mat,
symmetric_block_size,
max_num_blocks,
):
"""Returns the padded blocked symmetric matrix.
The size of the padded matrix will be:
[symmetric_block_size, symmetric_block_size * max_num_blocks]
The input matrix can either:
- Be square with size less or equal to symmetric_block_size. In this case,
mat will first be padded to a square matrix of size symmetric_block_size,
and then be padded again up to the full size of the blocked matrix.
- Be a rectangle with number of rows equal to block size.
In this case, number of columns must be a multiple of number of rows, and
the ratio must correspond to a block representation of a symmetric matrix.
That is, the ratio must have form x * (x + 1) / 2. Here, x represents the
number of block rows represented by the matrix.
Args:
mat: The input block matrix.
symmetric_block_size: The size of blocks.
max_num_blocks: The largest number of blocks to pad to.
"""
rows, cols = mat.shape
if rows > symmetric_block_size:
raise ValueError(
"Must have rows <= symmetric_block_size. Instead got "
f"rows={rows}, symmetric_block_size={symmetric_block_size}."
)
if rows > cols:
raise ValueError(
f"Must have rows <= cols, instead got rows={rows}, cols={cols}."
)
if cols > symmetric_block_size * max_num_blocks:
raise ValueError(
"Must have cols <= symmetric_block_size * max_num_blocks "
f"Instead got cols={cols}, "
f"symmetric_block_size={symmetric_block_size}, "
f"max_num_blocks={max_num_blocks}."
)
if rows < symmetric_block_size:
mat = pad_square_matrix(mat, max_size=symmetric_block_size)
# Update rows and cols after possibly padding in pad_square_matrix.
rows, cols = mat.shape
assert rows == symmetric_block_size
assert cols % rows == 0
filled_blocks = cols // rows
padding_blocks = make_sliced_padding(
symmetric_block_size=symmetric_block_size,
num_blocks=symmetric_matrices.num_blocks_from_total_blocks(max_num_blocks),
starting_block=symmetric_matrices.num_blocks_from_total_blocks(filled_blocks),
dtype=mat.dtype,
)
return jnp.concatenate([mat, padding_blocks], axis=-1) | Returns the padded blocked symmetric matrix.
The size of the padded matrix will be:
[symmetric_block_size, symmetric_block_size * max_num_blocks]
The input matrix can either:
- Be square with size less or equal to symmetric_block_size. In this case,
mat will first be padded to a square matrix of size symmetric_block_size,
and then be padded again up to the full size of the blocked matrix.
- Be a rectangle with number of rows equal to block size.
In this case, number of columns must be a multiple of number of rows, and
the ratio must correspond to a block representation of a symmetric matrix.
That is, the ratio must have form x * (x + 1) / 2. Here, x represents the
number of block rows represented by the matrix.
Args:
mat: The input block matrix.
symmetric_block_size: The size of blocks.
max_num_blocks: The largest number of blocks to pad to. | pad_block_symmetric_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def pad_vector(vec, max_size):
"""Pad a vector to a max_size.
Args:
vec: a vector to pad.
max_size: matrix size requested.
Returns:
Given V returns [V, 0]
"""
size = vec.shape[0]
assert size <= max_size
if size == max_size:
return vec
pad_size = max_size - size
zs1 = jnp.zeros([pad_size], dtype=vec.dtype)
return jnp.concatenate([vec, zs1], 0) | Pad a vector to a max_size.
Args:
vec: a vector to pad.
max_size: matrix size requested.
Returns:
Given V returns [V, 0] | pad_vector | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def efficient_cond(predicate, compute_fn, init_state, *args, **kwargs):
"""Avoids wasteful buffer allocation with XLA."""
def _iter_body(unused_state):
results = compute_fn(*args, **kwargs)
return tuple([False] + list(results))
def _iter_condition(state):
return state[0]
results = jax.lax.while_loop(
_iter_condition, _iter_body, tuple([predicate] + init_state)
)
return tuple(results[1:]) | Avoids wasteful buffer allocation with XLA. | efficient_cond | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.