docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Reads DFTB+ structure files in gen format.
Args:
filename: name of the gen-file to be read
Returns:
atoms: an object of the phonopy.Atoms class, representing the structure
found in filename | def read_dftbp(filename):
infile = open(filename, 'r')
lines = infile.readlines()
# remove any comments
for ss in lines:
if ss.strip().startswith('#'):
lines.remove(ss)
natoms = int(lines[0].split()[0])
symbols = lines[1].split()
if (lines[0].split()[1].lower() == 'f'):
is_scaled = True
scale_pos = 1
scale_latvecs = dftbpToBohr
else:
is_scaled = False
scale_pos = dftbpToBohr
scale_latvecs = dftbpToBohr
# assign positions and expanded symbols
positions = []
expaned_symbols = []
for ii in range(2, natoms+2):
lsplit = lines[ii].split()
expaned_symbols.append(symbols[int(lsplit[1]) - 1])
positions.append([float(ss)*scale_pos for ss in lsplit[2:5]])
# origin is ignored, may be used in future
origin = [float(ss) for ss in lines[natoms+2].split()]
# assign coords of unitcell
cell = []
for ii in range(natoms+3, natoms+6):
lsplit = lines[ii].split()
cell.append([float(ss)*scale_latvecs for ss in lsplit[:3]])
cell = np.array(cell)
if is_scaled:
atoms = Atoms(symbols=expaned_symbols,
cell=cell,
scaled_positions=positions)
else:
atoms = Atoms(symbols=expaned_symbols,
cell=cell,
positions=positions)
return atoms | 210,914 |
Reduces expanded list of symbols.
Args:
symbols: list containing any chemical symbols as often as
the atom appears in the structure
Returns:
reduced_symbols: any symbols appears only once | def get_reduced_symbols(symbols):
reduced_symbols = []
for ss in symbols:
if not (ss in reduced_symbols):
reduced_symbols.append(ss)
return reduced_symbols | 210,915 |
Writes DFTB+ readable, gen-formatted structure files
Args:
filename: name of the gen-file to be written
atoms: object containing information about structure | def write_dftbp(filename, atoms):
scale_pos = dftbpToBohr
lines = ""
# 1. line, use absolute positions
natoms = atoms.get_number_of_atoms()
lines += str(natoms)
lines += ' S \n'
# 2. line
expaned_symbols = atoms.get_chemical_symbols()
symbols = get_reduced_symbols(expaned_symbols)
lines += ' '.join(symbols) + '\n'
atom_numbers = []
for ss in expaned_symbols:
atom_numbers.append(symbols.index(ss) + 1)
positions = atoms.get_positions()/scale_pos
for ii in range(natoms):
pos = positions[ii]
pos_str = "{:3d} {:3d} {:20.15f} {:20.15f} {:20.15f}\n".format(
ii + 1, atom_numbers[ii], pos[0], pos[1], pos[2])
lines += pos_str
# origin arbitrary
lines +='0.0 0.0 0.0\n'
cell = atoms.get_cell()/scale_pos
for ii in range(3):
cell_str = "{:20.15f} {:20.15f} {:20.15f}\n".format(
cell[ii][0], cell[ii][1], cell[ii][2])
lines += cell_str
outfile = open(filename, 'w')
outfile.write(lines) | 210,916 |
Writes perfect supercell and supercells with displacements
Args:
supercell: perfect supercell
cells_with_disps: supercells with displaced atoms
filename: root-filename | def write_supercells_with_displacements(supercell, cells_with_disps, filename="geo.gen"):
# original cell
write_dftbp(filename + "S", supercell)
# displaced cells
for ii in range(len(cells_with_disps)):
write_dftbp(filename + "S-{:03d}".format(ii+1), cells_with_disps[ii]) | 210,917 |
Construct the parser.
Args:
string_or_filelike: Either the string to parse, or a file-like object
supporting the readline method.
parser_delegate: An instance of the ParserDelegate class, that will be
responsible for constructing appropriate objects for configurable
references and macros. | def __init__(self, string_or_filelike, parser_delegate):
if hasattr(string_or_filelike, 'readline'):
line_reader = string_or_filelike.readline
else: # Assume it's string-like.
if six.PY2:
string_or_filelike = unicode(string_or_filelike)
string_io = io.StringIO(string_or_filelike)
line_reader = string_io.readline
def _text_line_reader():
line = line_reader()
if isinstance(line, bytes):
line = line.decode('utf8')
return line
self._token_generator = tokenize.generate_tokens(_text_line_reader)
self._filename = getattr(string_or_filelike, 'name', None)
self._current_token = None
self._delegate = parser_delegate
self._advance_one_token() | 211,024 |
Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`. | def _format_value(value):
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None | 211,051 |
Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False. | def clear_config(clear_constants=False):
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear() | 211,052 |
Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`. | def _might_have_parameter(fn_or_cls, arg_name):
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
else:
if arg_spec.keywords:
return True
return arg_name in arg_spec.args | 211,055 |
Retrieve all default values for configurable parameters of a function.
Any parameters included in the supplied blacklist, or not included in the
supplied whitelist, are excluded.
Args:
fn: The function whose parameter values should be retrieved.
whitelist: The whitelist (or `None`) associated with the function.
blacklist: The blacklist (or `None`) associated with the function.
Returns:
A dictionary mapping configurable parameter names to their default values. | def _get_default_configurable_parameter_values(fn, whitelist, blacklist):
arg_vals = _ARG_DEFAULTS_CACHE.get(fn)
if arg_vals is not None:
return arg_vals.copy()
# First, grab any default values not captured in the kwargs var.
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.defaults:
default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]
arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))
else:
arg_vals = {}
if six.PY3 and arg_spec.kwonlydefaults:
arg_vals.update(arg_spec.kwonlydefaults)
# Now, eliminate keywords that are blacklisted, or aren't whitelisted (if
# there's a whitelist), or aren't representable as a literal value.
for k in list(six.iterkeys(arg_vals)):
whitelist_fail = whitelist and k not in whitelist
blacklist_fail = blacklist and k in blacklist
representable = _is_literally_representable(arg_vals[k])
if whitelist_fail or blacklist_fail or not representable:
del arg_vals[k]
_ARG_DEFAULTS_CACHE[fn] = arg_vals
return arg_vals.copy() | 211,060 |
Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader. | def parse_config_file(config_file, skip_unknown=False):
for reader, existence_check in _FILE_READERS:
if existence_check(config_file):
with reader(config_file) as f:
parse_config(f, skip_unknown=skip_unknown)
return
raise IOError('Unable to open file: {}'.format(config_file)) | 211,068 |
Provides an iterator over references in the given config.
Args:
config: A dictionary mapping scoped configurable names to argument bindings.
to: If supplied, only yield references whose `configurable_fn` matches `to`.
Yields:
`ConfigurableReference` instances within `config`, maybe restricted to those
matching the `to` parameter if it is supplied. | def iterate_references(config, to=None):
for value in _iterate_flattened_values(config):
if isinstance(value, ConfigurableReference):
if to is None or value.configurable.fn_or_cls == to:
yield value | 211,073 |
Decorator for an enum class that generates Gin constants from values.
Generated constants have format `module.ClassName.ENUM_VALUE`. The module
name is optional when using the constant.
Args:
cls: Class type.
module: The module to associate with the constants, to help handle naming
collisions. If `None`, `cls.__module__` will be used.
Returns:
Class type (identity function).
Raises:
TypeError: When applied to a non-enum class. | def constants_from_enum(cls, module=None):
if not issubclass(cls, enum.Enum):
raise TypeError("Class '{}' is not subclass of enum.".format(cls.__name__))
if module is None:
module = cls.__module__
for value in cls:
constant('{}.{}'.format(module, str(value)), value)
return cls | 211,077 |
Associates a value with `complete_selector`.
This function also performs some additional bookkeeping to facilitate
partial matching of selectors.
Args:
complete_selector: The (complete) selector to associate a value with.
value: The value to associate.
Raises:
ValueError: If `complete_selector` isn't a string consisting of valid
Python identifiers separated by periods. | def __setitem__(self, complete_selector, value):
if not SELECTOR_RE.match(complete_selector):
raise ValueError("Invalid selector '{}'.".format(complete_selector))
selector_components = complete_selector.split('.')
node = self._selector_tree
# Iterate backwards over the components of the selector.
for component in selector_components[::-1]:
node = node.setdefault(component, {})
node[_TERMINAL_KEY] = complete_selector
self._selector_map[complete_selector] = value | 211,088 |
Returns the minimal selector that uniquely matches `complete_selector`.
Args:
complete_selector: A complete selector stored in the map.
Returns:
A partial selector that unambiguously matches `complete_selector`.
Raises:
KeyError: If `complete_selector` is not in the map. | def minimal_selector(self, complete_selector):
if complete_selector not in self._selector_map:
raise KeyError("No value with selector '{}'.".format(complete_selector))
selector_components = complete_selector.split('.')
node = self._selector_tree
start = None
for i, component in enumerate(reversed(selector_components)):
if len(node) == 1:
if start is None:
start = -i # Negative index, since we're iterating in reverse.
else:
start = None
node = node[component]
if len(node) > 1: # The selector is a substring of another selector.
return complete_selector
return '.'.join(selector_components[start:]) | 211,092 |
Returns a list of the generated/transformed columns.
Arguments:
X_original: df
the original (input) DataFrame.
X_transformed: df
the transformed (current) DataFrame.
to_transform: [str]
a list of columns that were transformed (as in the original DataFrame), commonly self.cols.
Output:
a list of columns that were transformed (as in the current DataFrame). | def get_generated_cols(X_original, X_transformed, to_transform):
original_cols = list(X_original.columns)
if len(to_transform) > 0:
[original_cols.remove(c) for c in to_transform]
current_cols = list(X_transformed.columns)
if len(original_cols) > 0:
[current_cols.remove(c) for c in original_cols]
return current_cols | 212,757 |
Download file to local
Args:
- url(string): url request path
- target_path(string): download destination | def http_download(url, target_path):
r = requests.get(url, stream=True)
with open(target_path, 'wb') as f:
# shutil.copyfileobj(resp, f)
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return target_path | 212,944 |
Take ios screenshot
Args:
- filename(string): optional
Returns:
PIL.Image object | def screenshot(self, filename=None):
image = self.d.screenshot()
if self.rotation:
method = getattr(Image, 'ROTATE_{}'.format(self.rotation*90))
image = image.transpose(method)
if filename:
image.save(filename)
return image | 212,971 |
Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self | def click(self, x, y):
self._run_nowait('target.tap({x: %d, y: %d})' % (x/self._scale, y/self._scale))
return self | 212,972 |
Simulate click within window screen.
Args:
x, y: int, pixel distance from window (left, top) as origin
Returns:
None | def click(self, x, y):
print 'click at', x, y
self._input_left_mouse(x, y) | 212,993 |
Connect to a device, and return its object
Args:
platform: string one of <android|ios|windows>
Returns:
None
Raises:
SyntaxError, EnvironmentError | def connect(*args, **kwargs):
connect_url = _connect_url(*args)
platform = kwargs.pop('platform', _detect_platform(connect_url))
cls = None
if platform == 'android':
os.environ['JSONRPC_TIMEOUT'] = "60" # default is 90s which is too long.
devcls = __import__('atx.drivers.android')
cls = devcls.drivers.android.AndroidDevice
elif platform == 'windows':
devcls = __import__('atx.drivers.windows')
cls = devcls.drivers.windows.WindowsDevice
elif platform == 'ios':
devcls = __import__('atx.drivers.ios_webdriveragent')
cls = devcls.drivers.ios_webdriveragent.IOSDevice
elif platform == 'webdriver':
devcls = __import__('atx.drivers.webdriver')
cls = devcls.drivers.webdriver.WebDriver
elif platform == 'dummy': # for py.test use
devcls = __import__('atx.drivers.dummy')
cls = devcls.drivers.dummy.DummyDevice
if cls is None:
raise SyntaxError('Platform: %s not exists' % platform)
c = cls(connect_url, **kwargs)
c.platform = platform
return c | 213,026 |
Start app by bundle_id
Args:
- bundle_id(string): ex com.netease.my
Returns:
idevicedebug subprocess instance | def start_app(self, bundle_id):
idevicedebug = must_look_exec('idevicedebug')
# run in background
kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}
if sys.platform != 'darwin':
kwargs['close_fds'] = True
return subprocess.Popen([idevicedebug, "--udid", self.udid, 'run', bundle_id], **kwargs) | 213,058 |
Delay some seconds
Args:
secs: float seconds
Returns:
self | def delay(self, secs):
secs = int(secs)
for i in reversed(range(secs)):
sys.stdout.write('\r')
sys.stdout.write("sleep %ds, left %2ds" % (secs, i+1))
sys.stdout.flush()
time.sleep(1)
sys.stdout.write("\n")
return self | 213,065 |
Set region of the screen area
Args:
bounds: Bounds object
Returns:
A new AndroidDevice object
Raises:
TypeError | def region(self, bounds):
if not isinstance(bounds, Bounds):
raise TypeError("region param bounds must be isinstance of Bounds")
_d = copy.copy(self)
_d._bounds = bounds
return _d | 213,073 |
Take screen snapshot
Args:
- filename: filename where save to, optional
Returns:
PIL.Image object
Raises:
TypeError, IOError | def screenshot(self, filename=None):
if self.__keep_screen:
return self.__last_screen
try:
screen = self._take_screenshot()
except IOError:
# try taks screenshot again
log.warn("warning, screenshot failed [2/1], retry again")
screen = self._take_screenshot()
self.__last_screen = screen
if filename:
save_dir = os.path.dirname(filename) or '.'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
screen.save(filename)
return screen | 213,076 |
Return immediately if no image found
Args:
- pattern (str or Pattern): filename or an opencv image object.
- action (str): click or long_click
Returns:
Click point or None | def click_nowait(self, pattern, action='click', desc=None, **match_kwargs):
point = self.match(pattern, **match_kwargs)
if not point or not point.matched:
return None
func = getattr(self, action)
func(*point.pos)
return point | 213,078 |
Unix style output, already replace \r\n to \n
Args:
- timeout (float): timeout for a command exec | def run_cmd(self, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
p = self.raw_cmd(*args, **kwargs)
return p.communicate(timeout=timeout)[0].decode('utf-8').replace('\r\n', '\n') | 213,104 |
Uninstall package
Args:
- package_name(string): package name ex: com.example.demo
- keep_data(bool): keep the data and cache directories | def app_uninstall(self, package_name, keep_data=False):
if keep_data:
return self.run_cmd('uninstall', '-k', package_name)
else:
return self.run_cmd('uninstall', package_name) | 213,107 |
Take device screenshot
Args:
- filename(string): optional, save int filename
- scale(float): scale size
- method(string): one of minicap,screencap
Return:
PIL.Image | def screenshot(self, filename=None, scale=1.0, method=None):
image = None
method = method or self._screenshot_method
if method == 'minicap':
try:
image = self._adb_minicap(scale)
except Exception as e:
logger.warn("use minicap failed, fallback to screencap. error detail: %s", e)
self._screenshot_method = 'screencap'
return self.screenshot(filename=filename, scale=scale)
elif method == 'screencap':
image = self._adb_screencap(scale)
else:
raise RuntimeError("No such method(%s)" % method)
if filename:
image.save(filename)
return image | 213,113 |
Initial AndroidDevice
Args:
serial (str): serial or wlan ip
Returns:
AndroidDevice object
Raises:
EnvironmentError | def __init__(self, serial=None, **kwargs):
self.__display = None
serial = serial or getenvs('ATX_ADB_SERIALNO', 'ANDROID_SERIAL')
self._host = kwargs.get('host') or getenvs(
'ATX_ADB_HOST', 'ANDROID_ADB_SERVER_HOST') or '127.0.0.1'
self._port = int(kwargs.get('port') or getenvs(
'ATX_ADB_PORT', 'ANDROID_ADB_SERVER_PORT') or 5037)
self._adb_client = adbkit.Client(self._host, self._port)
self._adb_device = self._adb_client.device(serial)
# self._adb_shell_timeout = 30.0 # max adb shell exec time
# uiautomator2
self._uiauto = uiautomator2.connect_usb(serial)
if not self._uiauto.alive:
self._uiauto.healthcheck(unlock=False)
DeviceMixin.__init__(self)
self._randid = base.id_generator(5)
self.screen_rotation = None
# inherts from atx-uiautomator
self.swipe = self._uiauto.swipe
self.drag = self._uiauto.drag
self.press = self._uiauto.press
self.long_click = self._uiauto.long_click
self.dump = self._uiauto.dump_hierarchy | 213,173 |
Forward device port to local
Args:
device_port: port inside device
local_port: port on PC, if this value is None, a port will random pick one.
Returns:
tuple, (host, local_port) | def forward(self, device_port, local_port=None):
port = self._adb_device.forward(device_port, local_port)
return (self._host, port) | 213,174 |
Run adb command, for example: adb(['pull', '/data/local/tmp/a.png'])
Args:
command: string or list of string
Returns:
command output | def adb_cmd(self, command, **kwargs):
kwargs['timeout'] = kwargs.get('timeout', self._adb_shell_timeout)
if isinstance(command, list) or isinstance(command, tuple):
return self.adb_device.run_cmd(*list(command), **kwargs)
return self.adb_device.run_cmd(command, **kwargs) | 213,179 |
Stop application
Args:
package_name: string like com.example.app1
clear: bool, remove user data
Returns:
None | def stop_app(self, package_name, clear=False):
if clear:
self.adb_shell(['pm', 'clear', package_name])
else:
self.adb_shell(['am', 'force-stop', package_name])
return self | 213,182 |
Mark a point
Args:
- img(numpy): the source image
- x, y(int): position | def mark_point(img, x, y):
overlay = img.copy()
output = img.copy()
alpha = 0.5
radius = max(5, min(img.shape[:2])//15)
center = int(x), int(y)
color = (0, 0, 255)
cv2.circle(overlay, center, radius, color, -1)
cv2.addWeighted(overlay, alpha, output, 1-alpha, 0, output)
return output | 213,195 |
Start an application
Args:
- bundle_id: (string) apk bundle ID
Returns:
WDA session object | def start_app(self, bundle_id):
# if self._session is not None:
# self.stop_app()
self._bundle_id = bundle_id
self._session = self._wda.session(bundle_id)
return self._session | 213,199 |
Simulate click operation
Args:
x, y(int): position | def do_tap(self, x, y):
rx, ry = x/self.scale, y/self.scale
self.session.tap(rx, ry) | 213,205 |
Simulate swipe operation
Args:
x1, y1(int): from position
x2, y2(int): to position
duration(float): swipe duration, unit seconds | def swipe(self, x1, y1, x2, y2, duration=0.5):
scale = self.scale
x1, y1, x2, y2 = x1/scale, y1/scale, x2/scale, y2/scale
self.session.swipe(x1, y1, x2, y2, duration) | 213,206 |
Take a screenshot, also called by Mixin
Args:
- filename(string): file name to save
Returns:
PIL Image object | def _take_screenshot(self):
raw_png = self._wda.screenshot()
img = Image.open(BytesIO(raw_png))
return img | 213,207 |
This is different from _save_screenshot.
The return value maybe None or the screenshot path
Args:
screenshot: bool or PIL image | def _take_screenshot(self, screenshot=False, name_prefix='unknown'):
if isinstance(screenshot, bool):
if not screenshot:
return
return self._save_screenshot(name_prefix=name_prefix)
if isinstance(screenshot, Image.Image):
return self._save_screenshot(screen=screenshot, name_prefix=name_prefix)
raise TypeError("invalid type for func _take_screenshot: "+ type(screenshot)) | 213,281 |
Check v1 is equals v2, and take screenshot if not equals
Args:
- desc (str): some description
- safe (bool): will omit AssertionError if set to True
- screenshot: can be type <None|True|False|PIL.Image> | def assert_equal(self, v1, v2, **kwargs):#, desc=None, screenshot=False, safe=False):
is_success = v1 == v2
if is_success:
message = "assert equal success, %s == %s" %(v1, v2)
else:
message = '%s not equal %s' % (v1, v2)
kwargs.update({
'message': message,
'success': is_success,
})
self._add_assert(**kwargs) | 213,284 |
Assert if image exists
Args:
- pattern: image filename # not support pattern for now
- timeout (float): seconds
- safe (bool): not raise assert error even throung failed. | def assert_image_exists(self, pattern, timeout=20.0, **kwargs):
pattern = self.d.pattern_open(pattern)
match_kwargs = kwargs.copy()
match_kwargs.pop('safe', None)
match_kwargs.update({
'timeout': timeout,
'safe': True,
})
res = self.d.wait(pattern, **match_kwargs)
is_success = res is not None
message = 'assert image exists'
if res:
x, y = res.pos
kwargs['position'] = {'x': x, 'y': y}
message = 'image exists\npos %s\nconfidence=%.2f\nmethod=%s' % (res.pos, res.confidence, res.method)
else:
res = self.d.match(pattern)
if res is None:
message = 'Image not found'
else:
th = kwargs.get('threshold') or pattern.threshold or self.image_match_threshold
message = 'Matched: %s\nPosition: %s\nConfidence: %.2f\nThreshold: %.2f' % (
res.matched, res.pos, res.confidence, th)
kwargs['target'] = self._save_screenshot(pattern, name_prefix='target')
kwargs['screenshot'] = self.last_screenshot
kwargs.update({
'action': 'assert_image_exists',
'message': message,
'success': is_success,
})
self._add_assert(**kwargs) | 213,285 |
For Android & IOS
Args:
- ui: need have property "exists"
- desc (str): description
- safe (bool): will omit AssertionError if set to True
- screenshot: can be type <None|True|False|PIL.Image>
- platform (str, default:android): android | ios | def assert_ui_exists(self, ui, **kwargs):
is_success = ui.exists
if is_success:
if kwargs.get('screenshot') is not None:
if self.d.platform == 'android':
bounds = ui.info['bounds'] # For android only.
kwargs['position'] = {
'x': (bounds['left']+bounds['right'])//2,
'y': (bounds['top']+bounds['bottom'])//2,
}
elif self.d.platform == 'ios':
bounds = ui.bounds # For iOS only.
kwargs['position'] = {
'x': self.d.scale*(bounds.x+bounds.width//2),
'y': self.d.scale*(bounds.y+bounds.height//2),
}
message = 'UI exists'
else:
message = 'UI not exists'
kwargs.update({
'message': message,
'success': is_success,
})
self._add_assert(**kwargs) | 213,286 |
Initializer.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation. | def __init__(self, mtf_graph, mesh_shape, mtf_outputs=()):
self.mtf_graph = mtf_graph
self.mesh_shape = mesh_shape
self.mtf_outputs = mtf_outputs
self._layout_validator = None # valid_layouts.LayoutValidator
self._graph_interface = None | 213,444 |
Configurable layer stack.
Args:
layers: a list of subclasses of TransformerLayer
num_layers: an integer
Returns:
a LayerStack | def make_layer_stack(layers=gin.REQUIRED, num_layers=6):
return LayerStack([cls() for cls in layers] * num_layers) | 213,448 |
Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors | def get_states(self, n):
return self.states[len(self.new_states):len(self.new_states) + n] | 213,451 |
Compute tokenwise (elementwise) accuracy.
Args:
labels: ground-truth labels, shape=(batch, seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate. | def token_accuracy(labels, outputs):
weights = tf.to_float(tf.not_equal(labels, 0))
return tf.metrics.accuracy(labels, outputs, weights=weights) | 213,454 |
Compute the sequence-level accuracy.
A sequence is only considered correct if all of its entries were predicted
correctly.
Args:
labels: ground-truth labels, shape=(batch, packed_seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate. | def sequence_accuracy(labels, outputs):
# A sequence is correct if all of the non-padded entries are correct
all_correct = tf.reduce_all(
tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1
)
return tf.metrics.mean(all_correct) | 213,455 |
Initializer.
Args:
graph: either a tf.Graph or mtf.Graph.
canonical_device: optional string, the name of the canonical device for
IsTensoronCanonicalDevice. | def __init__(self, graph, canonical_device=None):
self._graph = graph
self.canonical_device = canonical_device
self._operations = self._initialize_operations()
self._operation_name_to_id = self._initialize_operation_name_to_id()
self._tensor_name_to_ids = self._initialize_tensor_name_to_ids()
self._final_tensors = set() | 213,456 |
Generates the names of all input tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an input tensor. | def get_operation_input_names(self, operation_name):
for input_tensor in self._name_to_operation(operation_name).inputs:
yield input_tensor.name | 213,457 |
Generates the names of all output tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an output tensor. | def get_operation_output_names(self, operation_name):
for output_tensor in self._name_to_operation(operation_name).outputs:
yield output_tensor.name | 213,458 |
The tf.TensorShape of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.TensorShape | def get_tensor_shape(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tf.TensorShape(tensor.shape.to_integer_list)
else: # tf.Tensor
return tensor.shape | 213,459 |
The number of entries in a tensor.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the number of entries on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer | def get_tensor_num_entries(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
shape = self.get_tensor_shape(tensor_name)
# We don't have to worry about divisiblity issues because Mesh TensorFlow
# only allows evenly divisible assignments.
num_entries = 1
for dim in shape.dims:
num_entries = num_entries * dim.value
if not partial_layout:
return num_entries
for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):
if mtf_dimension_name not in partial_layout:
continue
mesh_dimension_name = partial_layout[mtf_dimension_name]
mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]
num_entries = int(math.ceil(num_entries / mesh_dimension_size))
return num_entries | 213,460 |
The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer | def get_tensor_size(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
return (self.get_tensor_dtype(tensor_name).size *
self.get_tensor_num_entries(tensor_name, partial_layout,
mesh_dimension_to_size)) | 213,461 |
The device of a tensor.
Note that only tf tensors have device assignments.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a string or None, representing the device name. | def get_tensor_device(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, tf.Tensor):
return tensor.device
else: # mtf.Tensor
return None | 213,462 |
Whether the tensor is on the first (canonical) device.
Tensors not assigned to a device are assumed to be on all devices, including
the canonical device.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor is on the first device. | def is_tensor_on_canonical_device(self, tensor_name):
device = self.get_tensor_device(tensor_name)
return not device or device == self.canonical_device | 213,463 |
The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name. | def get_operation_device(self, operation_name):
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else: # mtf.Operation
return None | 213,464 |
The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions. | def get_tensor_mtf_dimension_names(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else: # tf.Tensor
return [] | 213,465 |
The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions. | def get_operation_mtf_dimension_names(self, operation_name):
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names | 213,466 |
Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph. | def set_tensor_final(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
self._final_tensors.add(tensor) | 213,467 |
Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output. | def is_tensor_final(self, tensor_name):
tensor = self._name_to_tensor(tensor_name)
return tensor in self._final_tensors | 213,468 |
The tensor with the given name.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a tf.Tensor or mtf.Tensor | def _name_to_tensor(self, tensor_name):
id1, id2 = self._tensor_name_to_ids[tensor_name]
return self._operations[id1].outputs[id2] | 213,474 |
Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x. | def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias | 213,477 |
Batch normalization.
Args:
x: a mtf.Tensor whose shape contains [batch_dim, ..., dim]
is_training: a boolean, whether mode is training.
momentum: a floating point number, specifying batch norm decay value.
epsilon: a floating point number.
init_zero: a boolean, whether to initialize scale with 0's or 1's.
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x. | def batch_norm(x, is_training, momentum, epsilon=1e-9,
init_zero=False, name=None):
with tf.variable_scope(name, default_name="batch_norm", values=[x]):
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
norm_dim = x.shape.dims[0:3]
reduced_shape = x.shape - norm_dim
scale = mtf.get_variable(
x.mesh,
"batch_norm_scale",
reduced_shape,
initializer=gamma_initializer,
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"batch_norm_bias",
reduced_shape,
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
moving_mean = mtf.get_variable(
x.mesh, "moving_mean", reduced_shape,
initializer=tf.random_normal_initializer(stddev=1.0),
activation_dtype=x.dtype,
trainable=False)
moving_variance = mtf.get_variable(
x.mesh, "moving_variance",
reduced_shape, initializer=tf.ones_initializer(),
activation_dtype=x.dtype,
trainable=False)
# At training time, calculate mean and variance and normalize across batch
# dim.
if is_training:
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(
mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
# Update running mean and running variance.
moving_mean = mtf.assign(
moving_mean, momentum * moving_mean + (1-momentum) * mean)
moving_variance = mtf.assign(
moving_variance,
momentum * moving_variance + (1 - momentum) * variance)
else:
# At eval and test time, use the running mean and variance.
norm_x = (x - moving_mean) * mtf.rsqrt(moving_variance + epsilon)
return (norm_x * scale) + bias | 213,478 |
Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match. | def sigmoid_cross_entropy_with_logits(logits, targets):
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x))) | 213,480 |
Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x. | def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo") | 213,482 |
Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim] | def attention_mask_ignore_padding(inputs, dtype=tf.float32):
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9 | 213,494 |
Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim] | def attention_mask_autoregressive(query_pos, dtype=tf.float32):
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9 | 213,495 |
Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim] | def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9 | 213,496 |
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x. | def multiplicative_jitter(x, epsilon=1e-2):
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype) | 213,499 |
Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor | def compress_mean(x, dim, compression_factor):
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x | 213,501 |
Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name. | def get_metric_fns(metric_names, labels, outputs):
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns | 213,505 |
Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule. | def minimize_peak_memory(graph, scheduler_alg):
if scheduler_alg == 'NAIVE':
return _minimize_peak_memory_naive(graph)
elif scheduler_alg == 'LIST':
return _minimize_peak_memory_list(graph)
else:
raise NotImplementedError('{} is not a scheduler algorithm. It should be '
'one of NAIVE or LIST.'
.format(scheduler_alg)) | 213,506 |
Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule. | def _minimize_peak_memory_list(graph):
schedule = []
bytes_freed = {} # {operation_name: bytes freed}
users_of = collections.defaultdict(set) # {tensor_name: set(operation_name)}
in_degree = collections.defaultdict(int) # {operation_name: in degree}
operation_id = {} # {operation_name: id}
# We want an updatable priority queue, so we use the following workaround:
# docs.python.org/2/library/heapq.html#priority-queue-implementation-notes
priority_queue = [] # (negative bytes freed, operation name)
# Set up the (greedy) topological sort.
for i, operation_name in enumerate(graph.get_all_operation_names()):
operation_id[operation_name] = i
for input_name in graph.get_operation_input_names(operation_name):
# Note that in _HybridGraphInterface, an operation may use a tensor twice,
# but we deduplicate (with respect to in_degree) so that we can later use
# users_of to decrement in_degree.
if operation_name in users_of[input_name]:
continue
users_of[input_name].add(operation_name)
in_degree[operation_name] += 1
for operation_name in graph.get_all_operation_names():
bytes_freed[operation_name] = 0
# For each input, this operation frees memory if it is the final consumer.
for input_name in graph.get_operation_input_names(operation_name):
if len(users_of[input_name]) == 1 and not graph.is_tensor_final(
input_name):
bytes_freed[operation_name] += graph.get_tensor_size(input_name)
# For each output, this operation will require additional bytes of memory
# (hence negative bytes freed).
for output_name in graph.get_operation_output_names(operation_name):
# If the output is used (or is final), then it eats memory.
if users_of[output_name] or graph.is_tensor_final(output_name):
bytes_freed[operation_name] -= graph.get_tensor_size(output_name)
for operation_name in graph.get_all_operation_names():
if in_degree[operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[operation_name], operation_name))
# Do the (greedy) topological sort.
while priority_queue:
neg_bytes_freed, operation_name = heapq.heappop(priority_queue)
if bytes_freed[operation_name] != -neg_bytes_freed:
continue
schedule.append(operation_id[operation_name])
bytes_freed[operation_name] = None
for output_name in graph.get_operation_output_names(operation_name):
for other_operation_name in users_of[output_name]:
in_degree[other_operation_name] -= 1
if in_degree[other_operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[other_operation_name],
other_operation_name))
for input_name in graph.get_operation_input_names(operation_name):
if operation_name not in users_of[input_name]:
# Used twice by this operation and hence already removed.
continue
users_of[input_name].remove(operation_name)
if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):
continue
(other_operation_name,) = users_of[input_name]
bytes_freed[other_operation_name] += graph.get_tensor_size(
input_name)
if in_degree[other_operation_name] > 0:
continue
# Push another copy into the priority queue with our updated value.
# The original copy will be ignored since it does not match bytes_freed.
heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],
other_operation_name))
return schedule | 213,507 |
Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules | def layout(mtf_graph, mesh_shape, mtf_outputs=()):
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve()) | 213,508 |
Second-moment decay rate like Adam, subsuming the correction factor.
Args:
beta2: a float between 0 and 1
Returns:
a scalar | def adafactor_decay_rate_adam(beta2):
t = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + 1.0
decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))
return decay | 213,510 |
Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values | def adafactor_optimizer_from_hparams(hparams, lr):
if hparams.optimizer_adafactor_decay_type == "Adam":
decay_rate = adafactor_decay_rate_adam(
hparams.optimizer_adafactor_beta2)
elif hparams.optimizer_adafactor_decay_type == "pow":
decay_rate = adafactor_decay_rate_pow(
hparams.optimizer_adafactor_memory_exponent)
else:
raise ValueError("unknown optimizer_adafactor_decay_type")
return AdafactorOptimizer(
multiply_by_parameter_scale=(
hparams.optimizer_adafactor_multiply_by_parameter_scale),
learning_rate=lr,
decay_rate=decay_rate,
beta1=hparams.optimizer_adafactor_beta1,
clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
factored=hparams.optimizer_adafactor_factored) | 213,511 |
Apply gradients to variables.
Call this function externally instead of apply_grad(). This causes the
operations to be combined, which is necessary for stacking variables
see mtf.rewrite_stack_variables().
Args:
grads: a list of Tensor
variables: a list of Variables
Returns:
a list of Operations | def apply_grads(self, grads, variables):
ops = []
for grad, var in zip(grads, variables):
ops.extend(self.apply_grad(grad, var))
if not ops:
return ops
return variables[0].graph.combine_assignments(ops) | 213,512 |
Should we use a factored second moment estimator.
Based on the shape of the variable.
If we factor the accumulator, then this function returns a list of two
mtf.Dimensions to reduce over. We always pick the two largest dimensions.
If there are not two dimensions of size >= min_dim_size_to_factor, then we
do not factor.
Args:
shape: a Shape
Returns:
either a list of 2 Dimensions or None | def _factored_dims(self, shape):
if not self._factored or shape.ndims < 2:
return None
sorted_dims = sorted(shape.dims, key=lambda d: -d.size)
if sorted_dims[1].size < self._min_dim_size_to_factor:
return None
return sorted_dims[:2] | 213,515 |
Initializer.
Args:
mtf_graph: an mtf.Graph, representing the Mesh TensorFlow computation of
interest.
mesh_shape: an mtf.Shape, representing the mesh of interest. | def __init__(self, mtf_graph, mesh_shape):
self._splittable_mtf_dimension_names = self._initialize_splittable_dimensions(
mtf_graph)
self._mtf_dimension_name_to_size_gcd = (
self._initialize_mtf_dimension_name_to_size_gcd(mtf_graph))
self._mesh_dimension_name_to_size = self._initialize_mesh_dimension_name_to_size(
mesh_shape) | 213,517 |
Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid. | def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0)) | 213,518 |
Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout. | def _initialize_splittable_dimensions(self, mtf_graph):
all_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names | 213,519 |
Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x. | def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(
mtf_dimension_name_to_size_gcd.get(mtf_dimension.name,
mtf_dimension.size),
mtf_dimension.size)
return mtf_dimension_name_to_size_gcd | 213,520 |
Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes. | def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
mesh_dimension_name_to_size = {} # {string: int}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size | 213,521 |
Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings | def allreduce_ring_single_shard(xs, devices, reduction_fn_string="SUM"):
n = len(xs)
binary_reduction = mtf.binary_reduction_fn(reduction_fn_string)
assert len(devices) == n, "devices must be a list of length len(xs)"
if n == 1:
return xs
result = [None] * n
if n % 2 == 0:
left_center = n // 2 - 1
right_center = left_center + 1
else:
left_center = n // 2
right_center = left_center
left_sum = xs[0]
for i in xrange(1, left_center + 1):
with tf.device(devices[i]):
left_sum = binary_reduction(left_sum, xs[i])
right_sum = xs[n-1]
for i in reversed(xrange(left_center + 1, n - 1)):
with tf.device(devices[i]):
right_sum = binary_reduction(xs[i], right_sum)
with tf.device(devices[left_center]):
result[left_center] = binary_reduction(left_sum, right_sum)
if n % 2 == 0:
with tf.device(devices[right_center]):
result[right_center] = binary_reduction(left_sum, right_sum)
for i in reversed(xrange(left_center)):
with tf.device(devices[i]):
result[i] = tf.identity(result[i + 1])
for i in xrange(right_center + 1, n):
with tf.device(devices[i]):
result[i] = tf.identity(result[i - 1])
return result | 213,523 |
Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings | def allreduce_ring(xs, devices, reduction_fn_string="SUM"):
n = len(xs)
if len(devices) != n:
raise ValueError("devices must be a list of length len(xs)")
if n == 1:
return xs
shape = xs[0].shape.as_list()
# tf.logging.info("allreduce_ring shape = %s" % shape)
size = None if None in shape else mtf.list_product(shape)
if size is None or size < 1024 or size % n != 0:
return allreduce_ring_single_shard(xs, devices, reduction_fn_string)
def _circular_shift(l, n):
n %= len(l)
return l[-n:] + l[:-n]
def _flatten_and_split(x):
# tf.reshape treats [-1] as a special value denoting 1D flattening.
return tf.split(tf.reshape(x, [-1]), n)
def _concat_and_reshape(xs):
return tf.reshape(tf.concat(xs, 0), shape)
# [device, shard]
x_split = mtf.parallel(devices, _flatten_and_split, xs)
x_split_t = mtf.transpose_list_of_lists(x_split)
y_split_t = []
for shard in xrange(n):
shard_xs = _circular_shift(x_split_t[shard], shard)
shard_devices = _circular_shift(devices, shard)
shard_ys = allreduce_ring_single_shard(
shard_xs, shard_devices, reduction_fn_string)
y_split_t.append(_circular_shift(shard_ys, -shard))
y_split = mtf.transpose_list_of_lists(y_split_t)
ys = mtf.parallel(devices, _concat_and_reshape, y_split)
return ys | 213,524 |
Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors | def allconcat_ring(xs, devices, concat_axis):
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = [[xs[target] if target == source else None for source in xrange(n)]
for target in xrange(n)]
for distance in xrange(1, n // 2 + 1):
for target in xrange(n):
source = (target + distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target + 1) % n][source])
source = (target - distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target - 1) % n][source])
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | 213,525 |
MPI alltoall operation.
Implementation of alltoall using pointwise communication.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
split_axis: an integer
concat_axis: an integer
Returns:
a list of n Tensors | def alltoall_pointtwise(xs, devices, split_axis, concat_axis):
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = mtf.transpose_list_of_lists(
mtf.parallel(devices, tf.split, xs, [n] * n, axis=[split_axis] * n))
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | 213,526 |
MPI alltoall operation.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
split_axis: an integer
concat_axis: an integer
Returns:
a list of n Tensors | def alltoall_ring(xs, devices, split_axis, concat_axis):
n = len(xs)
if n == 1:
return xs
# set up
# [target, source]
parts = [[None] * n for i in xrange(n)]
def my_split(x, size_splits):
total_size = tf.shape(x)[split_axis]
part_size = total_size // sum(size_splits)
return tf.split(x, [s * part_size for s in size_splits], axis=split_axis)
forward_message_size = (n - 1) // 2
backward_message_size = (n - 1) - forward_message_size
forward_messages = [None] * n
backward_messages = [None] * n
for i in xrange(n):
with tf.device(devices[i]):
if i >= backward_message_size:
a, b, c, d = my_split(
xs[i], [i - backward_message_size,
backward_message_size, 1, n - i - 1])
backward_messages[i] = b
parts[i][i] = c
forward_messages[i] = tf.concat([d, a], axis=split_axis)
else:
a, b, c, d = my_split(
xs[i], [i, 1, forward_message_size, backward_message_size - i])
backward_messages[i] = tf.concat([d, a], axis=split_axis)
parts[i][i] = b
forward_messages[i] = c
for step in xrange(1, max(forward_message_size, backward_message_size) + 1):
new_forward_messages = [None] * n
new_backward_messages = [None] * n
for i in xrange(n):
with tf.device(devices[i]):
if forward_message_size > 0:
parts[i][(i - step) % n], new_forward_messages[i] = my_split(
forward_messages[(i - 1) % n], [1, forward_message_size - 1])
if backward_message_size > 0:
new_backward_messages[i], parts[i][(i + step) % n] = my_split(
backward_messages[(i + 1) % n], [backward_message_size - 1, 1])
forward_message_size -= 1
backward_message_size -= 1
forward_messages = new_forward_messages
backward_messages = new_backward_messages
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | 213,527 |
Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. | def slicewise(self, fn, *inputs):
if fn == tf.add:
assert len(inputs) == 2
if isinstance(inputs[0], mtf.LazyAllreduceSum):
# sum of LazyAllreduceSum (keep delaying the allreduce)
return inputs[0] + inputs[1]
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
inputs = [x.tensor_list if isinstance(x, self.LaidOutTensor)
else [x] * len(self.devices) for x in inputs]
ret = mtf.parallel(self.devices, fn, *inputs)
if isinstance(ret[0], tuple):
ret = mtf.transpose_list_of_lists(ret)
return tuple([self.LaidOutTensor(t) for t in ret])
else:
return self.LaidOutTensor(ret) | 213,529 |
call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor | def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
tf.logging.info("PlacementMeshImpl::Print")
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices) | 213,530 |
Grouped allreduce, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
reduction_fn_string: "SUM" or "MAX"
Returns:
a LaidOutTensor | def allreduce(self, x, mesh_axes, reduction_fn_string):
return self._collective_with_groups(
x, mesh_axes, functools.partial(
allreduce_ring, reduction_fn_string=reduction_fn_string)) | 213,531 |
Grouped allconcat (like MPI allgather followed by concat).
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor | def allconcat(self, x, mesh_axis, concat_axis):
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(allconcat_ring, concat_axis=concat_axis)) | 213,532 |
Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor | def alltoall(self, x, mesh_axis, split_axis, concat_axis):
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(
alltoall_ring, split_axis=split_axis, concat_axis=concat_axis)) | 213,533 |
Grouped collective, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor)
Returns:
a LaidOutTensor | def _collective_with_groups(self, x, mesh_axes, collective):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if len(mesh_axes) == self.ndims:
return self.LaidOutTensor(collective(x.tensor_list, self._devices))
else:
groups = mtf.processor_groups(self.shape, mesh_axes)
ret = [None] * self.size
for g in groups:
inputs = [x.tensor_list[pnum] for pnum in g]
devices = [self._devices[pnum] for pnum in g]
reduced = collective(inputs, devices)
for pnum, y in zip(g, reduced):
ret[pnum] = y
return self.LaidOutTensor(ret) | 213,535 |
Call a random tf operation (e.g. tf.random.uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor | def random(self, shape, tf_fn, kwargs):
slice_shape = self.slice_shape(shape)
op_seed = random.random()
def my_fn(pnum):
# seeds are necessary to make sure that slices that should have the
# same values actually do have the same values.
seed = hash("%s,%s" % (op_seed, self.slice_begin(shape, pnum)))
return tf_fn(slice_shape, seed=seed, **kwargs)
return self.slicewise(my_fn, self.laid_out_pnum()) | 213,536 |
Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor | def export_to_tf_tensor(self, x, laid_out_x):
return self.combine_slices(laid_out_x.all_slices, x.shape) | 213,537 |
Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor | def import_tf_tensor(self, x, tf_x):
return self.LaidOutTensor(self.make_slices(tf_x, x.shape)) | 213,538 |
Common case attention parameters.
Args:
mesh: a Mesh
io_dim: a Dimension (channels dimension of inputs and outputs)
kv_dim: a Dimension (channels in keys and values)
heads_dim: a Dimension (number of attention "heads")
variable_dtype: a mtf.VariableDType
Returns:
an AttentionParams | def attention_params_simple(
mesh, io_dim, kv_dim, heads_dim, variable_dtype):
return AttentionParams(
mesh,
query_input_dim=io_dim,
memory_input_dim=io_dim,
output_dim=io_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=[heads_dim],
memory_heads_dims=[heads_dim],
variable_dtype=variable_dtype) | 213,541 |
Compute query Tensor q.
Args:
query_antecedent: a Tensor with dimensions
{query_input_dim} + other_dims
Returns:
a Tensor with dimensions
query_heads_dims + {key_dim} + other_dims | def compute_q(self, query_antecedent):
ret = mtf.einsum(
[query_antecedent, self.wq], reduced_dims=[self.query_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)
return ret | 213,544 |
Compute key/value Tensor kv.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims | def compute_kv(self, memory_antecedent):
if not self.shared_kv:
raise ValueError("compute_kv can only be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wkv], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret | 213,545 |
Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims | def compute_k(self, memory_antecedent):
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret | 213,546 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.