body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
824d26f49c186501d879d3395043cf0e091ac50813e242638197dd4e405dd8a3 | def test_join_on_eq_with_pos_dt_outside_window(self):
'\n Should get 0 answers because N matches but 0 within dt window\n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
dt = np.int64(8)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt', int(0))
self.assertEqual(0, I.size)
self.assertEqual(0, J.size) | Should get 0 answers because N matches but 0 within dt window | tests/join_test.py | test_join_on_eq_with_pos_dt_outside_window | mcdobe100/arkouda | 0 | python | def test_join_on_eq_with_pos_dt_outside_window(self):
'\n \n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
dt = np.int64(8)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt', int(0))
self.assertEqual(0, I.size)
self.assertEqual(0, J.size) | def test_join_on_eq_with_pos_dt_outside_window(self):
'\n \n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
dt = np.int64(8)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'pos_dt', int(0))
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)<|docstring|>Should get 0 answers because N matches but 0 within dt window<|endoftext|> |
672a15693f7febbcb8be0f7993750a267acc86e7d8dc944d8ec740a5b076acc1 | def test_error_handling(self):
'\n Tests error TypeError and ValueError handling\n '
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([list(range(0, 11))], self.a1, self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, list(range(0, 11))], self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, list(range(0, 11))], self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, self.t1, list(range(0, 11))], 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, self.t2, '8', 'pos_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'ab_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'abs_dt', (- 1)) | Tests error TypeError and ValueError handling | tests/join_test.py | test_error_handling | mcdobe100/arkouda | 0 | python | def test_error_handling(self):
'\n \n '
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([list(range(0, 11))], self.a1, self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, list(range(0, 11))], self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, list(range(0, 11))], self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, self.t1, list(range(0, 11))], 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, self.t2, '8', 'pos_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'ab_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'abs_dt', (- 1)) | def test_error_handling(self):
'\n \n '
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([list(range(0, 11))], self.a1, self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, list(range(0, 11))], self.t1, self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, list(range(0, 11))], self.t2, 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt([self.a1, self.a1, self.t1, list(range(0, 11))], 8, 'pos_dt')
with self.assertRaises(TypeError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, self.t2, '8', 'pos_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'ab_dt')
with self.assertRaises(ValueError):
ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), 8, 'abs_dt', (- 1))<|docstring|>Tests error TypeError and ValueError handling<|endoftext|> |
1199d277d3d6d89c30dec59411d5eede6d96d499cf37956440694d58182d224c | def train(self, inputs, targets):
'\n :param inputs: Tensor[batch, channels, timestep]\n :param targets: Tensor[batch, channels, timestep]\n '
outputs = self.net(inputs)
loss = self.loss(outputs.view(self.in_channels, (- 1)).transpose(0, 1), targets.long().view((- 1)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.data[0] | :param inputs: Tensor[batch, channels, timestep]
:param targets: Tensor[batch, channels, timestep] | wavenet/model.py | train | wusq121/wavenet | 2 | python | def train(self, inputs, targets):
'\n :param inputs: Tensor[batch, channels, timestep]\n :param targets: Tensor[batch, channels, timestep]\n '
outputs = self.net(inputs)
loss = self.loss(outputs.view(self.in_channels, (- 1)).transpose(0, 1), targets.long().view((- 1)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.data[0] | def train(self, inputs, targets):
'\n :param inputs: Tensor[batch, channels, timestep]\n :param targets: Tensor[batch, channels, timestep]\n '
outputs = self.net(inputs)
loss = self.loss(outputs.view(self.in_channels, (- 1)).transpose(0, 1), targets.long().view((- 1)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.data[0]<|docstring|>:param inputs: Tensor[batch, channels, timestep]
:param targets: Tensor[batch, channels, timestep]<|endoftext|> |
1245eae19cfe365babd2e4919c0f1c51fbc87c9c97e218f254edd19d9993f328 | @home.route('/logout')
def logout():
'点击退出 进入登录页面'
return redirect(url_for('home.login')) | 点击退出 进入登录页面 | app/home/views.py | logout | summerliu1024/flask_movie | 1 | python | @home.route('/logout')
def logout():
return redirect(url_for('home.login')) | @home.route('/logout')
def logout():
return redirect(url_for('home.login'))<|docstring|>点击退出 进入登录页面<|endoftext|> |
ee72416e894d0c9209830f1b47a9d4f4477267ce70470c96a75fa6268198a48a | def basic_tag2version(*, tag: str, **kwargs: Any) -> str:
'Implements the ``"basic"`` ``tag2version`` method'
try:
rmprefix = str_guard(kwargs.pop('rmprefix'), 'tool.versioningit.tag2version.rmprefix')
except KeyError:
pass
else:
tag = strip_prefix(tag, rmprefix)
try:
rmsuffix = str_guard(kwargs.pop('rmsuffix'), 'tool.versioningit.tag2version.rmsuffix')
except KeyError:
pass
else:
tag = strip_suffix(tag, rmsuffix)
require_match = bool(kwargs.pop('require-match', False))
try:
regex = str_guard(kwargs.pop('regex'), 'tool.versioningit.tag2version.regex')
except KeyError:
pass
else:
m = re.search(regex, tag)
if (m is None):
if require_match:
raise InvalidTagError(f'tag2version.regex did not match tag {tag!r}')
else:
log.info('tag2version.regex did not match tag %r; leaving unmodified', tag)
else:
if ('version' in m.groupdict()):
tag = m['version']
else:
tag = m[0]
if (tag is None):
raise InvalidTagError("'version' group in tool.versioningit.tag2version.regex did not participate in match")
warn_extra_fields(kwargs, 'tool.versioningit.tag2version', ['rmprefix', 'rmsuffix', 'regex', 'require-match'])
return tag.lstrip('v') | Implements the ``"basic"`` ``tag2version`` method | src/versioningit/basics.py | basic_tag2version | jenshnielsen/versioningit | 17 | python | def basic_tag2version(*, tag: str, **kwargs: Any) -> str:
try:
rmprefix = str_guard(kwargs.pop('rmprefix'), 'tool.versioningit.tag2version.rmprefix')
except KeyError:
pass
else:
tag = strip_prefix(tag, rmprefix)
try:
rmsuffix = str_guard(kwargs.pop('rmsuffix'), 'tool.versioningit.tag2version.rmsuffix')
except KeyError:
pass
else:
tag = strip_suffix(tag, rmsuffix)
require_match = bool(kwargs.pop('require-match', False))
try:
regex = str_guard(kwargs.pop('regex'), 'tool.versioningit.tag2version.regex')
except KeyError:
pass
else:
m = re.search(regex, tag)
if (m is None):
if require_match:
raise InvalidTagError(f'tag2version.regex did not match tag {tag!r}')
else:
log.info('tag2version.regex did not match tag %r; leaving unmodified', tag)
else:
if ('version' in m.groupdict()):
tag = m['version']
else:
tag = m[0]
if (tag is None):
raise InvalidTagError("'version' group in tool.versioningit.tag2version.regex did not participate in match")
warn_extra_fields(kwargs, 'tool.versioningit.tag2version', ['rmprefix', 'rmsuffix', 'regex', 'require-match'])
return tag.lstrip('v') | def basic_tag2version(*, tag: str, **kwargs: Any) -> str:
try:
rmprefix = str_guard(kwargs.pop('rmprefix'), 'tool.versioningit.tag2version.rmprefix')
except KeyError:
pass
else:
tag = strip_prefix(tag, rmprefix)
try:
rmsuffix = str_guard(kwargs.pop('rmsuffix'), 'tool.versioningit.tag2version.rmsuffix')
except KeyError:
pass
else:
tag = strip_suffix(tag, rmsuffix)
require_match = bool(kwargs.pop('require-match', False))
try:
regex = str_guard(kwargs.pop('regex'), 'tool.versioningit.tag2version.regex')
except KeyError:
pass
else:
m = re.search(regex, tag)
if (m is None):
if require_match:
raise InvalidTagError(f'tag2version.regex did not match tag {tag!r}')
else:
log.info('tag2version.regex did not match tag %r; leaving unmodified', tag)
else:
if ('version' in m.groupdict()):
tag = m['version']
else:
tag = m[0]
if (tag is None):
raise InvalidTagError("'version' group in tool.versioningit.tag2version.regex did not participate in match")
warn_extra_fields(kwargs, 'tool.versioningit.tag2version', ['rmprefix', 'rmsuffix', 'regex', 'require-match'])
return tag.lstrip('v')<|docstring|>Implements the ``"basic"`` ``tag2version`` method<|endoftext|> |
c45cecc7853bda2c0a6dd0955d9bda2d978213647c8beae0d3c64adcfb1dd8c0 | def basic_format(*, description: VCSDescription, version: str, next_version: str, **kwargs: Any) -> str:
'Implements the ``"basic"`` ``format`` method'
branch: Optional[str]
if (description.branch is not None):
branch = re.sub('[^A-Za-z0-9.]', '.', description.branch)
else:
branch = None
fields = {**description.fields, 'branch': branch, 'version': version, 'next_version': next_version}
formats = {**DEFAULT_FORMATS, **kwargs}
try:
fmt = formats[description.state]
except KeyError:
raise ConfigError(f'No format string for {description.state!r} state found in tool.versioningit.format')
return fmt.format_map(fields) | Implements the ``"basic"`` ``format`` method | src/versioningit/basics.py | basic_format | jenshnielsen/versioningit | 17 | python | def basic_format(*, description: VCSDescription, version: str, next_version: str, **kwargs: Any) -> str:
branch: Optional[str]
if (description.branch is not None):
branch = re.sub('[^A-Za-z0-9.]', '.', description.branch)
else:
branch = None
fields = {**description.fields, 'branch': branch, 'version': version, 'next_version': next_version}
formats = {**DEFAULT_FORMATS, **kwargs}
try:
fmt = formats[description.state]
except KeyError:
raise ConfigError(f'No format string for {description.state!r} state found in tool.versioningit.format')
return fmt.format_map(fields) | def basic_format(*, description: VCSDescription, version: str, next_version: str, **kwargs: Any) -> str:
branch: Optional[str]
if (description.branch is not None):
branch = re.sub('[^A-Za-z0-9.]', '.', description.branch)
else:
branch = None
fields = {**description.fields, 'branch': branch, 'version': version, 'next_version': next_version}
formats = {**DEFAULT_FORMATS, **kwargs}
try:
fmt = formats[description.state]
except KeyError:
raise ConfigError(f'No format string for {description.state!r} state found in tool.versioningit.format')
return fmt.format_map(fields)<|docstring|>Implements the ``"basic"`` ``format`` method<|endoftext|> |
82bb5f371b8c337dbabe1a9f0dff1b6b5eaa1e60ab7196b33b85d814a5c3f0b8 | def basic_write(*, project_dir: Union[(str, Path)], version: str, **kwargs: Any) -> None:
'Implements the ``"basic"`` ``write`` method'
try:
filename = str_guard(kwargs.pop('file'), 'tool.versioningit.write.file')
except KeyError:
log.debug("No 'file' field in tool.versioningit.write; not writing anything")
return
path = Path(project_dir, filename)
encoding = str_guard(kwargs.pop('encoding', 'utf-8'), 'tool.versioningit.write.encoding')
try:
template = str_guard(kwargs.pop('template'), 'tool.versioningit.write.template')
except KeyError:
if (path.suffix == '.py'):
template = '__version__ = "{version}"'
elif ((path.suffix == '.txt') or (path.suffix == '')):
template = '{version}'
else:
raise ConfigError(f'tool.versioningit.write.template not specified and file has unknown suffix {path.suffix!r}')
warn_extra_fields(kwargs, 'tool.versioningit.write', ['file', 'encoding', 'template'])
log.debug('Ensuring parent directories of %s exist', path)
path.parent.mkdir(parents=True, exist_ok=True)
log.info('Writing version %s to file %s', version, path)
path.write_text((template.format(version=version) + '\n'), encoding=encoding) | Implements the ``"basic"`` ``write`` method | src/versioningit/basics.py | basic_write | jenshnielsen/versioningit | 17 | python | def basic_write(*, project_dir: Union[(str, Path)], version: str, **kwargs: Any) -> None:
try:
filename = str_guard(kwargs.pop('file'), 'tool.versioningit.write.file')
except KeyError:
log.debug("No 'file' field in tool.versioningit.write; not writing anything")
return
path = Path(project_dir, filename)
encoding = str_guard(kwargs.pop('encoding', 'utf-8'), 'tool.versioningit.write.encoding')
try:
template = str_guard(kwargs.pop('template'), 'tool.versioningit.write.template')
except KeyError:
if (path.suffix == '.py'):
template = '__version__ = "{version}"'
elif ((path.suffix == '.txt') or (path.suffix == )):
template = '{version}'
else:
raise ConfigError(f'tool.versioningit.write.template not specified and file has unknown suffix {path.suffix!r}')
warn_extra_fields(kwargs, 'tool.versioningit.write', ['file', 'encoding', 'template'])
log.debug('Ensuring parent directories of %s exist', path)
path.parent.mkdir(parents=True, exist_ok=True)
log.info('Writing version %s to file %s', version, path)
path.write_text((template.format(version=version) + '\n'), encoding=encoding) | def basic_write(*, project_dir: Union[(str, Path)], version: str, **kwargs: Any) -> None:
try:
filename = str_guard(kwargs.pop('file'), 'tool.versioningit.write.file')
except KeyError:
log.debug("No 'file' field in tool.versioningit.write; not writing anything")
return
path = Path(project_dir, filename)
encoding = str_guard(kwargs.pop('encoding', 'utf-8'), 'tool.versioningit.write.encoding')
try:
template = str_guard(kwargs.pop('template'), 'tool.versioningit.write.template')
except KeyError:
if (path.suffix == '.py'):
template = '__version__ = "{version}"'
elif ((path.suffix == '.txt') or (path.suffix == )):
template = '{version}'
else:
raise ConfigError(f'tool.versioningit.write.template not specified and file has unknown suffix {path.suffix!r}')
warn_extra_fields(kwargs, 'tool.versioningit.write', ['file', 'encoding', 'template'])
log.debug('Ensuring parent directories of %s exist', path)
path.parent.mkdir(parents=True, exist_ok=True)
log.info('Writing version %s to file %s', version, path)
path.write_text((template.format(version=version) + '\n'), encoding=encoding)<|docstring|>Implements the ``"basic"`` ``write`` method<|endoftext|> |
f01d0ce7ea7861336c21c2f5aab0c180a42f65bea002af57191df8ef6defbc84 | @staticmethod
def generate_bonus_points(point_value, num_codes):
'Generates a set of random codes for the bonus points with the given\n point value.'
values = 'EXAMPLE_KEY'
header = 'BONUS'
header += '-'
header += str(point_value)
header += '-'
for _ in range(0, num_codes):
bonus = BonusPoint(point_value=point_value, code=header.lower(), create_date=datetime.datetime.now())
valid = False
while (not valid):
for value in random.sample(values, 5):
bonus.code += value
try:
bonus.save()
valid = True
except IntegrityError:
bonus.code = header | Generates a set of random codes for the bonus points with the given
point value. | makahiki/apps/widgets/bonus_points/models.py | generate_bonus_points | justinslee/Wai-Not-Makahiki | 1 | python | @staticmethod
def generate_bonus_points(point_value, num_codes):
'Generates a set of random codes for the bonus points with the given\n point value.'
values = 'EXAMPLE_KEY'
header = 'BONUS'
header += '-'
header += str(point_value)
header += '-'
for _ in range(0, num_codes):
bonus = BonusPoint(point_value=point_value, code=header.lower(), create_date=datetime.datetime.now())
valid = False
while (not valid):
for value in random.sample(values, 5):
bonus.code += value
try:
bonus.save()
valid = True
except IntegrityError:
bonus.code = header | @staticmethod
def generate_bonus_points(point_value, num_codes):
'Generates a set of random codes for the bonus points with the given\n point value.'
values = 'EXAMPLE_KEY'
header = 'BONUS'
header += '-'
header += str(point_value)
header += '-'
for _ in range(0, num_codes):
bonus = BonusPoint(point_value=point_value, code=header.lower(), create_date=datetime.datetime.now())
valid = False
while (not valid):
for value in random.sample(values, 5):
bonus.code += value
try:
bonus.save()
valid = True
except IntegrityError:
bonus.code = header<|docstring|>Generates a set of random codes for the bonus points with the given
point value.<|endoftext|> |
be187069a0543c9c45905978fb8dbf029f118dbfaf82fd130251ca2d3e82b30b | @wise
def idris_python(main_file_or_project_entry: str, packages: str='cam', idris: 'idris executable path'='idris', o: 'output .cam file'='<nocam>'):
'\n You can specify multiple packages by\n idris-python --packages "cam base effect"\n '
packages = (e.strip() for e in packages.split(' '))
out_cam = (o != '<nocam>')
if (not out_cam):
o = tempfile.mkstemp(suffix='.cam')[1]
p = Path(main_file_or_project_entry)
if (p.suffix == '.idr'):
ins = [str(p.absolute())]
else:
p = p.absolute()
with p.open('r') as f:
config = toml.load(f)
config = config['idris-cam']
assert (config.get('backend', 'python') == 'python'), 'The backend is specified'
modules = config['modules']
p: Path = p.parent
ins = []
for m in modules:
ins.append(str(p.joinpath('src', *m.split('.'))))
proc = Popen([idris, '--codegen', 'cam', *ins, '-o', o, '-p', *packages], stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate(timeout=30)
stdout = stdout.decode()
if stdout:
print(stdout)
if (proc.returncode is not 0):
print(stderr.decode())
return 1
if (not out_cam):
common_abstract_machine_python_loader([o])
return 0 | You can specify multiple packages by
idris-python --packages "cam base effect" | idris_python/cli.py | idris_python | thautwarm/idris-python | 30 | python | @wise
def idris_python(main_file_or_project_entry: str, packages: str='cam', idris: 'idris executable path'='idris', o: 'output .cam file'='<nocam>'):
'\n You can specify multiple packages by\n idris-python --packages "cam base effect"\n '
packages = (e.strip() for e in packages.split(' '))
out_cam = (o != '<nocam>')
if (not out_cam):
o = tempfile.mkstemp(suffix='.cam')[1]
p = Path(main_file_or_project_entry)
if (p.suffix == '.idr'):
ins = [str(p.absolute())]
else:
p = p.absolute()
with p.open('r') as f:
config = toml.load(f)
config = config['idris-cam']
assert (config.get('backend', 'python') == 'python'), 'The backend is specified'
modules = config['modules']
p: Path = p.parent
ins = []
for m in modules:
ins.append(str(p.joinpath('src', *m.split('.'))))
proc = Popen([idris, '--codegen', 'cam', *ins, '-o', o, '-p', *packages], stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate(timeout=30)
stdout = stdout.decode()
if stdout:
print(stdout)
if (proc.returncode is not 0):
print(stderr.decode())
return 1
if (not out_cam):
common_abstract_machine_python_loader([o])
return 0 | @wise
def idris_python(main_file_or_project_entry: str, packages: str='cam', idris: 'idris executable path'='idris', o: 'output .cam file'='<nocam>'):
'\n You can specify multiple packages by\n idris-python --packages "cam base effect"\n '
packages = (e.strip() for e in packages.split(' '))
out_cam = (o != '<nocam>')
if (not out_cam):
o = tempfile.mkstemp(suffix='.cam')[1]
p = Path(main_file_or_project_entry)
if (p.suffix == '.idr'):
ins = [str(p.absolute())]
else:
p = p.absolute()
with p.open('r') as f:
config = toml.load(f)
config = config['idris-cam']
assert (config.get('backend', 'python') == 'python'), 'The backend is specified'
modules = config['modules']
p: Path = p.parent
ins = []
for m in modules:
ins.append(str(p.joinpath('src', *m.split('.'))))
proc = Popen([idris, '--codegen', 'cam', *ins, '-o', o, '-p', *packages], stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate(timeout=30)
stdout = stdout.decode()
if stdout:
print(stdout)
if (proc.returncode is not 0):
print(stderr.decode())
return 1
if (not out_cam):
common_abstract_machine_python_loader([o])
return 0<|docstring|>You can specify multiple packages by
idris-python --packages "cam base effect"<|endoftext|> |
91b48d7f86a98a9d2314182e2d65455737805bb6c31e7ee6f0b6e2fda5beeb98 | @wise
def common_abstract_machine_python_loader(filename):
'\n The .cam file loader.\n '
return load_cam(filename, LinkSession()) | The .cam file loader. | idris_python/cli.py | common_abstract_machine_python_loader | thautwarm/idris-python | 30 | python | @wise
def common_abstract_machine_python_loader(filename):
'\n \n '
return load_cam(filename, LinkSession()) | @wise
def common_abstract_machine_python_loader(filename):
'\n \n '
return load_cam(filename, LinkSession())<|docstring|>The .cam file loader.<|endoftext|> |
0785b11e222aef42b93e471ef825337406f6f22d745e0ca95300ce42e67bafce | def get_signal_name(signum):
'Returns the signal name of the given signal number.'
return _signames[signum] | Returns the signal name of the given signal number. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | get_signal_name | anthonyricci123/python-telegram-bot-heroku | 2 | python | def get_signal_name(signum):
return _signames[signum] | def get_signal_name(signum):
return _signames[signum]<|docstring|>Returns the signal name of the given signal number.<|endoftext|> |
acaf122e5e0a8270bce09421659eca3e7a02fe4dafc19ea435d314e78ce524a5 | def escape_markdown(text, version=1, entity_type=None):
'\n Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link\n part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n '
if (int(version) == 1):
escape_chars = '\\*_`\\['
elif (int(version) == 2):
if ((entity_type == 'pre') or (entity_type == 'code')):
escape_chars = '`\\\\'
elif (entity_type == 'text_link'):
escape_chars = ')\\\\'
else:
escape_chars = '_*\\[\\]()~`>\\#\\+\\-=|{}\\.!'
else:
raise ValueError('Markdown version musst be either 1 or 2!')
return re.sub(('([%s])' % escape_chars), '\\\\\\1', text) | Helper function to escape telegram markup symbols.
Args:
text (:obj:`str`): The text.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``.
entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link
part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.
See the official API documentation for details. Only valid in combination with
``version=2``, will be ignored else. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | escape_markdown | anthonyricci123/python-telegram-bot-heroku | 2 | python | def escape_markdown(text, version=1, entity_type=None):
'\n Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link\n part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n '
if (int(version) == 1):
escape_chars = '\\*_`\\['
elif (int(version) == 2):
if ((entity_type == 'pre') or (entity_type == 'code')):
escape_chars = '`\\\\'
elif (entity_type == 'text_link'):
escape_chars = ')\\\\'
else:
escape_chars = '_*\\[\\]()~`>\\#\\+\\-=|{}\\.!'
else:
raise ValueError('Markdown version musst be either 1 or 2!')
return re.sub(('([%s])' % escape_chars), '\\\\\\1', text) | def escape_markdown(text, version=1, entity_type=None):
'\n Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link\n part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n '
if (int(version) == 1):
escape_chars = '\\*_`\\['
elif (int(version) == 2):
if ((entity_type == 'pre') or (entity_type == 'code')):
escape_chars = '`\\\\'
elif (entity_type == 'text_link'):
escape_chars = ')\\\\'
else:
escape_chars = '_*\\[\\]()~`>\\#\\+\\-=|{}\\.!'
else:
raise ValueError('Markdown version musst be either 1 or 2!')
return re.sub(('([%s])' % escape_chars), '\\\\\\1', text)<|docstring|>Helper function to escape telegram markup symbols.
Args:
text (:obj:`str`): The text.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``.
entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link
part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.
See the official API documentation for details. Only valid in combination with
``version=2``, will be ignored else.<|endoftext|> |
8e2278491923f04cb09c650efeff9fb6907fbac63afd7ea8f8564abe263e1fff | def _datetime_to_float_timestamp(dt_obj):
'Converts a datetime object to a float timestamp (with sub-second precision).\n If the datetime object is timezone-naive, it is assumed to be in UTC.'
if (dt_obj.tzinfo is None):
dt_obj = dt_obj.replace(tzinfo=dtm.timezone.utc)
return dt_obj.timestamp() | Converts a datetime object to a float timestamp (with sub-second precision).
If the datetime object is timezone-naive, it is assumed to be in UTC. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | _datetime_to_float_timestamp | anthonyricci123/python-telegram-bot-heroku | 2 | python | def _datetime_to_float_timestamp(dt_obj):
'Converts a datetime object to a float timestamp (with sub-second precision).\n If the datetime object is timezone-naive, it is assumed to be in UTC.'
if (dt_obj.tzinfo is None):
dt_obj = dt_obj.replace(tzinfo=dtm.timezone.utc)
return dt_obj.timestamp() | def _datetime_to_float_timestamp(dt_obj):
'Converts a datetime object to a float timestamp (with sub-second precision).\n If the datetime object is timezone-naive, it is assumed to be in UTC.'
if (dt_obj.tzinfo is None):
dt_obj = dt_obj.replace(tzinfo=dtm.timezone.utc)
return dt_obj.timestamp()<|docstring|>Converts a datetime object to a float timestamp (with sub-second precision).
If the datetime object is timezone-naive, it is assumed to be in UTC.<|endoftext|> |
5c8a91f76ec70876d7b10b775570a974091335d296343843f1bae667a23a2197 | def to_float_timestamp(t, reference_timestamp=None):
'\n Converts a given time object to a float POSIX timestamp.\n Used to convert different time specifications to a common format. The time object\n can be relative (i.e. indicate a time increment, or a time of day) or absolute.\n Any objects from the :class:`datetime` module that are timezone-naive will be assumed\n to be in UTC.\n\n ``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).\n\n Args:\n t (int | float | datetime.timedelta | datetime.datetime | datetime.time):\n Time value to convert. The semantics of this parameter will depend on its type:\n\n * :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"\n * :obj:`datetime.timedelta` will be interpreted as\n "time increment from ``reference_t``"\n * :obj:`datetime.datetime` will be interpreted as an absolute date/time value\n * :obj:`datetime.time` will be interpreted as a specific time of day\n\n reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time\n from which relative calculations are to be performed (e.g. when ``t`` is given as an\n :obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at\n which this function is called).\n\n If ``t`` is given as an absolute representation of date & time (i.e. a\n ``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its\n value should be ``None``. If this is not the case, a ``ValueError`` will be raised.\n\n Returns:\n (float | None) The return value depends on the type of argument ``t``. If ``t`` is\n given as a time increment (i.e. as a obj:`int`, :obj:`float` or\n :obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.\n\n Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`\n object), the equivalent value as a POSIX timestamp will be returned.\n\n Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`\n object), the return value is the nearest future occurrence of that time of day.\n\n Raises:\n TypeError: if `t`\'s type is not one of those described above\n '
if (reference_timestamp is None):
reference_timestamp = time.time()
elif isinstance(t, dtm.datetime):
raise ValueError('t is an (absolute) datetime while reference_timestamp is not None')
if isinstance(t, dtm.timedelta):
return (reference_timestamp + t.total_seconds())
elif isinstance(t, Number):
return (reference_timestamp + t)
elif isinstance(t, dtm.time):
if (t.tzinfo is not None):
reference_dt = dtm.datetime.fromtimestamp(reference_timestamp, tz=t.tzinfo)
else:
reference_dt = dtm.datetime.utcfromtimestamp(reference_timestamp)
reference_date = reference_dt.date()
reference_time = reference_dt.timetz()
if (reference_time > t):
reference_date += dtm.timedelta(days=1)
return _datetime_to_float_timestamp(dtm.datetime.combine(reference_date, t))
elif isinstance(t, dtm.datetime):
return _datetime_to_float_timestamp(t)
raise TypeError('Unable to convert {} object to timestamp'.format(type(t).__name__)) | Converts a given time object to a float POSIX timestamp.
Used to convert different time specifications to a common format. The time object
can be relative (i.e. indicate a time increment, or a time of day) or absolute.
Any objects from the :class:`datetime` module that are timezone-naive will be assumed
to be in UTC.
``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).
Args:
t (int | float | datetime.timedelta | datetime.datetime | datetime.time):
Time value to convert. The semantics of this parameter will depend on its type:
* :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"
* :obj:`datetime.timedelta` will be interpreted as
"time increment from ``reference_t``"
* :obj:`datetime.datetime` will be interpreted as an absolute date/time value
* :obj:`datetime.time` will be interpreted as a specific time of day
reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time
from which relative calculations are to be performed (e.g. when ``t`` is given as an
:obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at
which this function is called).
If ``t`` is given as an absolute representation of date & time (i.e. a
``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its
value should be ``None``. If this is not the case, a ``ValueError`` will be raised.
Returns:
(float | None) The return value depends on the type of argument ``t``. If ``t`` is
given as a time increment (i.e. as a obj:`int`, :obj:`float` or
:obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.
Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`
object), the equivalent value as a POSIX timestamp will be returned.
Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`
object), the return value is the nearest future occurrence of that time of day.
Raises:
TypeError: if `t`'s type is not one of those described above | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | to_float_timestamp | anthonyricci123/python-telegram-bot-heroku | 2 | python | def to_float_timestamp(t, reference_timestamp=None):
'\n Converts a given time object to a float POSIX timestamp.\n Used to convert different time specifications to a common format. The time object\n can be relative (i.e. indicate a time increment, or a time of day) or absolute.\n Any objects from the :class:`datetime` module that are timezone-naive will be assumed\n to be in UTC.\n\n ``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).\n\n Args:\n t (int | float | datetime.timedelta | datetime.datetime | datetime.time):\n Time value to convert. The semantics of this parameter will depend on its type:\n\n * :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"\n * :obj:`datetime.timedelta` will be interpreted as\n "time increment from ``reference_t``"\n * :obj:`datetime.datetime` will be interpreted as an absolute date/time value\n * :obj:`datetime.time` will be interpreted as a specific time of day\n\n reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time\n from which relative calculations are to be performed (e.g. when ``t`` is given as an\n :obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at\n which this function is called).\n\n If ``t`` is given as an absolute representation of date & time (i.e. a\n ``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its\n value should be ``None``. If this is not the case, a ``ValueError`` will be raised.\n\n Returns:\n (float | None) The return value depends on the type of argument ``t``. If ``t`` is\n given as a time increment (i.e. as a obj:`int`, :obj:`float` or\n :obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.\n\n Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`\n object), the equivalent value as a POSIX timestamp will be returned.\n\n Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`\n object), the return value is the nearest future occurrence of that time of day.\n\n Raises:\n TypeError: if `t`\'s type is not one of those described above\n '
if (reference_timestamp is None):
reference_timestamp = time.time()
elif isinstance(t, dtm.datetime):
raise ValueError('t is an (absolute) datetime while reference_timestamp is not None')
if isinstance(t, dtm.timedelta):
return (reference_timestamp + t.total_seconds())
elif isinstance(t, Number):
return (reference_timestamp + t)
elif isinstance(t, dtm.time):
if (t.tzinfo is not None):
reference_dt = dtm.datetime.fromtimestamp(reference_timestamp, tz=t.tzinfo)
else:
reference_dt = dtm.datetime.utcfromtimestamp(reference_timestamp)
reference_date = reference_dt.date()
reference_time = reference_dt.timetz()
if (reference_time > t):
reference_date += dtm.timedelta(days=1)
return _datetime_to_float_timestamp(dtm.datetime.combine(reference_date, t))
elif isinstance(t, dtm.datetime):
return _datetime_to_float_timestamp(t)
raise TypeError('Unable to convert {} object to timestamp'.format(type(t).__name__)) | def to_float_timestamp(t, reference_timestamp=None):
'\n Converts a given time object to a float POSIX timestamp.\n Used to convert different time specifications to a common format. The time object\n can be relative (i.e. indicate a time increment, or a time of day) or absolute.\n Any objects from the :class:`datetime` module that are timezone-naive will be assumed\n to be in UTC.\n\n ``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).\n\n Args:\n t (int | float | datetime.timedelta | datetime.datetime | datetime.time):\n Time value to convert. The semantics of this parameter will depend on its type:\n\n * :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"\n * :obj:`datetime.timedelta` will be interpreted as\n "time increment from ``reference_t``"\n * :obj:`datetime.datetime` will be interpreted as an absolute date/time value\n * :obj:`datetime.time` will be interpreted as a specific time of day\n\n reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time\n from which relative calculations are to be performed (e.g. when ``t`` is given as an\n :obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at\n which this function is called).\n\n If ``t`` is given as an absolute representation of date & time (i.e. a\n ``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its\n value should be ``None``. If this is not the case, a ``ValueError`` will be raised.\n\n Returns:\n (float | None) The return value depends on the type of argument ``t``. If ``t`` is\n given as a time increment (i.e. as a obj:`int`, :obj:`float` or\n :obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.\n\n Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`\n object), the equivalent value as a POSIX timestamp will be returned.\n\n Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`\n object), the return value is the nearest future occurrence of that time of day.\n\n Raises:\n TypeError: if `t`\'s type is not one of those described above\n '
if (reference_timestamp is None):
reference_timestamp = time.time()
elif isinstance(t, dtm.datetime):
raise ValueError('t is an (absolute) datetime while reference_timestamp is not None')
if isinstance(t, dtm.timedelta):
return (reference_timestamp + t.total_seconds())
elif isinstance(t, Number):
return (reference_timestamp + t)
elif isinstance(t, dtm.time):
if (t.tzinfo is not None):
reference_dt = dtm.datetime.fromtimestamp(reference_timestamp, tz=t.tzinfo)
else:
reference_dt = dtm.datetime.utcfromtimestamp(reference_timestamp)
reference_date = reference_dt.date()
reference_time = reference_dt.timetz()
if (reference_time > t):
reference_date += dtm.timedelta(days=1)
return _datetime_to_float_timestamp(dtm.datetime.combine(reference_date, t))
elif isinstance(t, dtm.datetime):
return _datetime_to_float_timestamp(t)
raise TypeError('Unable to convert {} object to timestamp'.format(type(t).__name__))<|docstring|>Converts a given time object to a float POSIX timestamp.
Used to convert different time specifications to a common format. The time object
can be relative (i.e. indicate a time increment, or a time of day) or absolute.
Any objects from the :class:`datetime` module that are timezone-naive will be assumed
to be in UTC.
``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).
Args:
t (int | float | datetime.timedelta | datetime.datetime | datetime.time):
Time value to convert. The semantics of this parameter will depend on its type:
* :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"
* :obj:`datetime.timedelta` will be interpreted as
"time increment from ``reference_t``"
* :obj:`datetime.datetime` will be interpreted as an absolute date/time value
* :obj:`datetime.time` will be interpreted as a specific time of day
reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time
from which relative calculations are to be performed (e.g. when ``t`` is given as an
:obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at
which this function is called).
If ``t`` is given as an absolute representation of date & time (i.e. a
``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its
value should be ``None``. If this is not the case, a ``ValueError`` will be raised.
Returns:
(float | None) The return value depends on the type of argument ``t``. If ``t`` is
given as a time increment (i.e. as a obj:`int`, :obj:`float` or
:obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.
Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`
object), the equivalent value as a POSIX timestamp will be returned.
Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`
object), the return value is the nearest future occurrence of that time of day.
Raises:
TypeError: if `t`'s type is not one of those described above<|endoftext|> |
b56288df6f21a02f3136ee6d773e9d3a011d2e1d7b2c2886122367681b53a427 | def to_timestamp(dt_obj, reference_timestamp=None):
'\n Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated\n down to the nearest integer).\n\n See the documentation for :func:`to_float_timestamp` for more details.\n '
return (int(to_float_timestamp(dt_obj, reference_timestamp)) if (dt_obj is not None) else None) | Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated
down to the nearest integer).
See the documentation for :func:`to_float_timestamp` for more details. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | to_timestamp | anthonyricci123/python-telegram-bot-heroku | 2 | python | def to_timestamp(dt_obj, reference_timestamp=None):
'\n Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated\n down to the nearest integer).\n\n See the documentation for :func:`to_float_timestamp` for more details.\n '
return (int(to_float_timestamp(dt_obj, reference_timestamp)) if (dt_obj is not None) else None) | def to_timestamp(dt_obj, reference_timestamp=None):
'\n Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated\n down to the nearest integer).\n\n See the documentation for :func:`to_float_timestamp` for more details.\n '
return (int(to_float_timestamp(dt_obj, reference_timestamp)) if (dt_obj is not None) else None)<|docstring|>Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated
down to the nearest integer).
See the documentation for :func:`to_float_timestamp` for more details.<|endoftext|> |
f50f8112ffce0b36eb7efbc0a40f6718ef2261e162fc5b01c93a805b5da35f32 | def from_timestamp(unixtime, tzinfo=dtm.timezone.utc):
'\n Converts an (integer) unix timestamp to a timezone aware datetime object.\n ``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).\n\n Args:\n unixtime (int): integer POSIX timestamp\n tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted\n to. Defaults to UTC.\n\n Returns:\n timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not\n ``None``; else ``None``\n '
if (unixtime is None):
return None
if (tzinfo is not None):
return dtm.datetime.fromtimestamp(unixtime, tz=tzinfo)
else:
return dtm.datetime.utcfromtimestamp(unixtime) | Converts an (integer) unix timestamp to a timezone aware datetime object.
``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).
Args:
unixtime (int): integer POSIX timestamp
tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted
to. Defaults to UTC.
Returns:
timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not
``None``; else ``None`` | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | from_timestamp | anthonyricci123/python-telegram-bot-heroku | 2 | python | def from_timestamp(unixtime, tzinfo=dtm.timezone.utc):
'\n Converts an (integer) unix timestamp to a timezone aware datetime object.\n ``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).\n\n Args:\n unixtime (int): integer POSIX timestamp\n tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted\n to. Defaults to UTC.\n\n Returns:\n timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not\n ``None``; else ``None``\n '
if (unixtime is None):
return None
if (tzinfo is not None):
return dtm.datetime.fromtimestamp(unixtime, tz=tzinfo)
else:
return dtm.datetime.utcfromtimestamp(unixtime) | def from_timestamp(unixtime, tzinfo=dtm.timezone.utc):
'\n Converts an (integer) unix timestamp to a timezone aware datetime object.\n ``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).\n\n Args:\n unixtime (int): integer POSIX timestamp\n tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted\n to. Defaults to UTC.\n\n Returns:\n timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not\n ``None``; else ``None``\n '
if (unixtime is None):
return None
if (tzinfo is not None):
return dtm.datetime.fromtimestamp(unixtime, tz=tzinfo)
else:
return dtm.datetime.utcfromtimestamp(unixtime)<|docstring|>Converts an (integer) unix timestamp to a timezone aware datetime object.
``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).
Args:
unixtime (int): integer POSIX timestamp
tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted
to. Defaults to UTC.
Returns:
timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not
``None``; else ``None``<|endoftext|> |
477af4322cbccb0e0f94f3fbeca4ba3a8774653ff04bf1e9ac9af784144f0e56 | def mention_html(user_id, name):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n "
if isinstance(user_id, int):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name)) | Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
Returns:
:obj:`str`: The inline mention for the user as html. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | mention_html | anthonyricci123/python-telegram-bot-heroku | 2 | python | def mention_html(user_id, name):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n "
if isinstance(user_id, int):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name)) | def mention_html(user_id, name):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n "
if isinstance(user_id, int):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))<|docstring|>Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
Returns:
:obj:`str`: The inline mention for the user as html.<|endoftext|> |
83373e0138c841106b59834d113c020795a958d278a80b0f06a56bb4d547441f | def mention_markdown(user_id, name, version=1):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n "
if isinstance(user_id, int):
return u'[{}](tg://user?id={})'.format(escape_markdown(name, version=version), user_id) | Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``
Returns:
:obj:`str`: The inline mention for the user as markdown. | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | mention_markdown | anthonyricci123/python-telegram-bot-heroku | 2 | python | def mention_markdown(user_id, name, version=1):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n "
if isinstance(user_id, int):
return u'[{}](tg://user?id={})'.format(escape_markdown(name, version=version), user_id) | def mention_markdown(user_id, name, version=1):
"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n "
if isinstance(user_id, int):
return u'[{}](tg://user?id={})'.format(escape_markdown(name, version=version), user_id)<|docstring|>Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``
Returns:
:obj:`str`: The inline mention for the user as markdown.<|endoftext|> |
88e3f31fd9b7af812a0f9743516aa6a6e5422fb690c9975ccb06b12c4c175ead | def effective_message_type(entity):
'\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n '
from telegram import Message
from telegram import Update
if isinstance(entity, Message):
message = entity
elif isinstance(entity, Update):
message = entity.effective_message
else:
raise TypeError('entity is not Message or Update (got: {})'.format(type(entity)))
for i in Message.MESSAGE_TYPES:
if getattr(message, i, None):
return i
return None | Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
:class:`telegram.Update`.
Args:
entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from
Returns:
str: One of ``Message.MESSAGE_TYPES`` | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | effective_message_type | anthonyricci123/python-telegram-bot-heroku | 2 | python | def effective_message_type(entity):
'\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n '
from telegram import Message
from telegram import Update
if isinstance(entity, Message):
message = entity
elif isinstance(entity, Update):
message = entity.effective_message
else:
raise TypeError('entity is not Message or Update (got: {})'.format(type(entity)))
for i in Message.MESSAGE_TYPES:
if getattr(message, i, None):
return i
return None | def effective_message_type(entity):
'\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n '
from telegram import Message
from telegram import Update
if isinstance(entity, Message):
message = entity
elif isinstance(entity, Update):
message = entity.effective_message
else:
raise TypeError('entity is not Message or Update (got: {})'.format(type(entity)))
for i in Message.MESSAGE_TYPES:
if getattr(message, i, None):
return i
return None<|docstring|>Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
:class:`telegram.Update`.
Args:
entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from
Returns:
str: One of ``Message.MESSAGE_TYPES``<|endoftext|> |
c4cf00ce30313ccfdbc052de31f246760e1acb41363bdaf29c45dfccf156ba86 | def create_deep_linked_url(bot_username, payload=None, group=False):
'\n Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.\n See https://core.telegram.org/bots#deep-linking to learn more.\n\n The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler("start", callback, filters = Filters.regex(\'payload\'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, "some-params")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the\n bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n '
if ((bot_username is None) or (len(bot_username) <= 3)):
raise ValueError('You must provide a valid bot_username.')
base_url = 'https://t.me/{}'.format(bot_username)
if (not payload):
return base_url
if (len(payload) > 64):
raise ValueError('The deep-linking payload must not exceed 64 characters.')
if (not re.match('^[A-Za-z0-9_-]+$', payload)):
raise ValueError('Only the following characters are allowed for deep-linked URLs: A-Z, a-z, 0-9, _ and -')
if group:
key = 'startgroup'
else:
key = 'start'
return '{0}?{1}={2}'.format(base_url, key, payload) | Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.
See https://core.telegram.org/bots#deep-linking to learn more.
The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``
Note:
Works well in conjunction with
``CommandHandler("start", callback, filters = Filters.regex('payload'))``
Examples:
``create_deep_linked_url(bot.get_me().username, "some-params")``
Args:
bot_username (:obj:`str`): The username to link to
payload (:obj:`str`, optional): Parameters to encode in the created URL
group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the
bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.
Returns:
:obj:`str`: An URL to start the bot with specific parameters | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | create_deep_linked_url | anthonyricci123/python-telegram-bot-heroku | 2 | python | def create_deep_linked_url(bot_username, payload=None, group=False):
'\n Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.\n See https://core.telegram.org/bots#deep-linking to learn more.\n\n The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler("start", callback, filters = Filters.regex(\'payload\'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, "some-params")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the\n bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n '
if ((bot_username is None) or (len(bot_username) <= 3)):
raise ValueError('You must provide a valid bot_username.')
base_url = 'https://t.me/{}'.format(bot_username)
if (not payload):
return base_url
if (len(payload) > 64):
raise ValueError('The deep-linking payload must not exceed 64 characters.')
if (not re.match('^[A-Za-z0-9_-]+$', payload)):
raise ValueError('Only the following characters are allowed for deep-linked URLs: A-Z, a-z, 0-9, _ and -')
if group:
key = 'startgroup'
else:
key = 'start'
return '{0}?{1}={2}'.format(base_url, key, payload) | def create_deep_linked_url(bot_username, payload=None, group=False):
'\n Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.\n See https://core.telegram.org/bots#deep-linking to learn more.\n\n The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler("start", callback, filters = Filters.regex(\'payload\'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, "some-params")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the\n bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n '
if ((bot_username is None) or (len(bot_username) <= 3)):
raise ValueError('You must provide a valid bot_username.')
base_url = 'https://t.me/{}'.format(bot_username)
if (not payload):
return base_url
if (len(payload) > 64):
raise ValueError('The deep-linking payload must not exceed 64 characters.')
if (not re.match('^[A-Za-z0-9_-]+$', payload)):
raise ValueError('Only the following characters are allowed for deep-linked URLs: A-Z, a-z, 0-9, _ and -')
if group:
key = 'startgroup'
else:
key = 'start'
return '{0}?{1}={2}'.format(base_url, key, payload)<|docstring|>Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.
See https://core.telegram.org/bots#deep-linking to learn more.
The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``
Note:
Works well in conjunction with
``CommandHandler("start", callback, filters = Filters.regex('payload'))``
Examples:
``create_deep_linked_url(bot.get_me().username, "some-params")``
Args:
bot_username (:obj:`str`): The username to link to
payload (:obj:`str`, optional): Parameters to encode in the created URL
group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the
bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.
Returns:
:obj:`str`: An URL to start the bot with specific parameters<|endoftext|> |
9c382ebfbc45ea6183f989659a2eb63918fe2e322dbb3b68dc0346e4e4c783ef | def encode_conversations_to_json(conversations):
'Helper method to encode a conversations dict (that uses tuples as keys) to a\n JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.\n\n Args:\n conversations (:obj:`dict`): The conversations dict to transofrm to JSON.\n\n Returns:\n :obj:`str`: The JSON-serialized conversations dict\n '
tmp = {}
for (handler, states) in conversations.items():
tmp[handler] = {}
for (key, state) in states.items():
tmp[handler][json.dumps(key)] = state
return json.dumps(tmp) | Helper method to encode a conversations dict (that uses tuples as keys) to a
JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.
Args:
conversations (:obj:`dict`): The conversations dict to transofrm to JSON.
Returns:
:obj:`str`: The JSON-serialized conversations dict | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | encode_conversations_to_json | anthonyricci123/python-telegram-bot-heroku | 2 | python | def encode_conversations_to_json(conversations):
'Helper method to encode a conversations dict (that uses tuples as keys) to a\n JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.\n\n Args:\n conversations (:obj:`dict`): The conversations dict to transofrm to JSON.\n\n Returns:\n :obj:`str`: The JSON-serialized conversations dict\n '
tmp = {}
for (handler, states) in conversations.items():
tmp[handler] = {}
for (key, state) in states.items():
tmp[handler][json.dumps(key)] = state
return json.dumps(tmp) | def encode_conversations_to_json(conversations):
'Helper method to encode a conversations dict (that uses tuples as keys) to a\n JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.\n\n Args:\n conversations (:obj:`dict`): The conversations dict to transofrm to JSON.\n\n Returns:\n :obj:`str`: The JSON-serialized conversations dict\n '
tmp = {}
for (handler, states) in conversations.items():
tmp[handler] = {}
for (key, state) in states.items():
tmp[handler][json.dumps(key)] = state
return json.dumps(tmp)<|docstring|>Helper method to encode a conversations dict (that uses tuples as keys) to a
JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.
Args:
conversations (:obj:`dict`): The conversations dict to transofrm to JSON.
Returns:
:obj:`str`: The JSON-serialized conversations dict<|endoftext|> |
f159739109dbdc9806c06b84995cb33975ec8f8c302ab9de6b168102e727cf3c | def decode_conversations_from_json(json_string):
'Helper method to decode a conversations dict (that uses tuples as keys) from a\n JSON-string created with :attr:`_encode_conversations_to_json`.\n\n Args:\n json_string (:obj:`str`): The conversations dict as JSON string.\n\n Returns:\n :obj:`dict`: The conversations dict after decoding\n '
tmp = json.loads(json_string)
conversations = {}
for (handler, states) in tmp.items():
conversations[handler] = {}
for (key, state) in states.items():
conversations[handler][tuple(json.loads(key))] = state
return conversations | Helper method to decode a conversations dict (that uses tuples as keys) from a
JSON-string created with :attr:`_encode_conversations_to_json`.
Args:
json_string (:obj:`str`): The conversations dict as JSON string.
Returns:
:obj:`dict`: The conversations dict after decoding | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | decode_conversations_from_json | anthonyricci123/python-telegram-bot-heroku | 2 | python | def decode_conversations_from_json(json_string):
'Helper method to decode a conversations dict (that uses tuples as keys) from a\n JSON-string created with :attr:`_encode_conversations_to_json`.\n\n Args:\n json_string (:obj:`str`): The conversations dict as JSON string.\n\n Returns:\n :obj:`dict`: The conversations dict after decoding\n '
tmp = json.loads(json_string)
conversations = {}
for (handler, states) in tmp.items():
conversations[handler] = {}
for (key, state) in states.items():
conversations[handler][tuple(json.loads(key))] = state
return conversations | def decode_conversations_from_json(json_string):
'Helper method to decode a conversations dict (that uses tuples as keys) from a\n JSON-string created with :attr:`_encode_conversations_to_json`.\n\n Args:\n json_string (:obj:`str`): The conversations dict as JSON string.\n\n Returns:\n :obj:`dict`: The conversations dict after decoding\n '
tmp = json.loads(json_string)
conversations = {}
for (handler, states) in tmp.items():
conversations[handler] = {}
for (key, state) in states.items():
conversations[handler][tuple(json.loads(key))] = state
return conversations<|docstring|>Helper method to decode a conversations dict (that uses tuples as keys) from a
JSON-string created with :attr:`_encode_conversations_to_json`.
Args:
json_string (:obj:`str`): The conversations dict as JSON string.
Returns:
:obj:`dict`: The conversations dict after decoding<|endoftext|> |
d190e5430e1b143fbf8fe79b642c58024dff15abf218cc403a82e27a3b987b11 | def decode_user_chat_data_from_json(data):
'Helper method to decode chat or user data (that uses ints as keys) from a\n JSON-string.\n\n Args:\n data (:obj:`str`): The user/chat_data dict as JSON string.\n\n Returns:\n :obj:`dict`: The user/chat_data defaultdict after decoding\n '
tmp = defaultdict(dict)
decoded_data = json.loads(data)
for (user, data) in decoded_data.items():
user = int(user)
tmp[user] = {}
for (key, value) in data.items():
try:
key = int(key)
except ValueError:
pass
tmp[user][key] = value
return tmp | Helper method to decode chat or user data (that uses ints as keys) from a
JSON-string.
Args:
data (:obj:`str`): The user/chat_data dict as JSON string.
Returns:
:obj:`dict`: The user/chat_data defaultdict after decoding | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | decode_user_chat_data_from_json | anthonyricci123/python-telegram-bot-heroku | 2 | python | def decode_user_chat_data_from_json(data):
'Helper method to decode chat or user data (that uses ints as keys) from a\n JSON-string.\n\n Args:\n data (:obj:`str`): The user/chat_data dict as JSON string.\n\n Returns:\n :obj:`dict`: The user/chat_data defaultdict after decoding\n '
tmp = defaultdict(dict)
decoded_data = json.loads(data)
for (user, data) in decoded_data.items():
user = int(user)
tmp[user] = {}
for (key, value) in data.items():
try:
key = int(key)
except ValueError:
pass
tmp[user][key] = value
return tmp | def decode_user_chat_data_from_json(data):
'Helper method to decode chat or user data (that uses ints as keys) from a\n JSON-string.\n\n Args:\n data (:obj:`str`): The user/chat_data dict as JSON string.\n\n Returns:\n :obj:`dict`: The user/chat_data defaultdict after decoding\n '
tmp = defaultdict(dict)
decoded_data = json.loads(data)
for (user, data) in decoded_data.items():
user = int(user)
tmp[user] = {}
for (key, value) in data.items():
try:
key = int(key)
except ValueError:
pass
tmp[user][key] = value
return tmp<|docstring|>Helper method to decode chat or user data (that uses ints as keys) from a
JSON-string.
Args:
data (:obj:`str`): The user/chat_data dict as JSON string.
Returns:
:obj:`dict`: The user/chat_data defaultdict after decoding<|endoftext|> |
03c5956be5bd9de73cb73f511c4ec4c6dab58f2553de0d25dabd279a1f0e1648 | def cuwb_data_csv():
'\n 1 minute of CUWB data\n 6 People\n 3 w/o acceleration data\n 5 Trays\n :return:\n '
return (os.path.dirname(os.path.realpath(__file__)) + '/fixtures/uwb.csv') | 1 minute of CUWB data
6 People
3 w/o acceleration data
5 Trays
:return: | tests/conftest.py | cuwb_data_csv | WildflowerSchools/wf-process-cuwb-data | 0 | python | def cuwb_data_csv():
'\n 1 minute of CUWB data\n 6 People\n 3 w/o acceleration data\n 5 Trays\n :return:\n '
return (os.path.dirname(os.path.realpath(__file__)) + '/fixtures/uwb.csv') | def cuwb_data_csv():
'\n 1 minute of CUWB data\n 6 People\n 3 w/o acceleration data\n 5 Trays\n :return:\n '
return (os.path.dirname(os.path.realpath(__file__)) + '/fixtures/uwb.csv')<|docstring|>1 minute of CUWB data
6 People
3 w/o acceleration data
5 Trays
:return:<|endoftext|> |
03721504319f55ce958359a65eabc9bf461eb1798a315bac21f77e1a2227aaee | def plot(self, figure=None, plot_class=None, domain=((- 5), 5), **opts):
'Uses McUtils to plot the wavefunction on the passed figure (makes a new one if none)\n\n :param figure:\n :type figure: Graphics | Graphics3D\n :return:\n :rtype:\n '
discrete = np.linspace(*domain, 100)
data = self.evaluate(discrete, **opts)
if (plot_class is None):
plot_class = Plot
return plot_class(discrete, data, figure=figure, **opts) | Uses McUtils to plot the wavefunction on the passed figure (makes a new one if none)
:param figure:
:type figure: Graphics | Graphics3D
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | plot | McCoyGroup/Coordinerds | 0 | python | def plot(self, figure=None, plot_class=None, domain=((- 5), 5), **opts):
'Uses McUtils to plot the wavefunction on the passed figure (makes a new one if none)\n\n :param figure:\n :type figure: Graphics | Graphics3D\n :return:\n :rtype:\n '
discrete = np.linspace(*domain, 100)
data = self.evaluate(discrete, **opts)
if (plot_class is None):
plot_class = Plot
return plot_class(discrete, data, figure=figure, **opts) | def plot(self, figure=None, plot_class=None, domain=((- 5), 5), **opts):
'Uses McUtils to plot the wavefunction on the passed figure (makes a new one if none)\n\n :param figure:\n :type figure: Graphics | Graphics3D\n :return:\n :rtype:\n '
discrete = np.linspace(*domain, 100)
data = self.evaluate(discrete, **opts)
if (plot_class is None):
plot_class = Plot
return plot_class(discrete, data, figure=figure, **opts)<|docstring|>Uses McUtils to plot the wavefunction on the passed figure (makes a new one if none)
:param figure:
:type figure: Graphics | Graphics3D
:return:
:rtype:<|endoftext|> |
ed48204ca6cbc97217048ed2ebd9206f9df4f9623046f2b4608d733102b5fd27 | def expect(self, operator):
'\n Provides expectation values of operators, but the operators have to be Operator objects...\n basically all the logic is inside the operator, but this is worth it for use in ExpansionWavefunction\n We can also potentially add support for ExpansionOperators or SymbolicOperators in the future that are\n able to very cleanly reuse stuff like the `p` matrix that a RepresentationBasis defines\n\n :param operator: the operator to take the expectation of\n :type operator: Operator\n '
return operator[(self.index, self.index)] | Provides expectation values of operators, but the operators have to be Operator objects...
basically all the logic is inside the operator, but this is worth it for use in ExpansionWavefunction
We can also potentially add support for ExpansionOperators or SymbolicOperators in the future that are
able to very cleanly reuse stuff like the `p` matrix that a RepresentationBasis defines
:param operator: the operator to take the expectation of
:type operator: Operator | Psience/BasisReps/Wavefunctions.py | expect | McCoyGroup/Coordinerds | 0 | python | def expect(self, operator):
'\n Provides expectation values of operators, but the operators have to be Operator objects...\n basically all the logic is inside the operator, but this is worth it for use in ExpansionWavefunction\n We can also potentially add support for ExpansionOperators or SymbolicOperators in the future that are\n able to very cleanly reuse stuff like the `p` matrix that a RepresentationBasis defines\n\n :param operator: the operator to take the expectation of\n :type operator: Operator\n '
return operator[(self.index, self.index)] | def expect(self, operator):
'\n Provides expectation values of operators, but the operators have to be Operator objects...\n basically all the logic is inside the operator, but this is worth it for use in ExpansionWavefunction\n We can also potentially add support for ExpansionOperators or SymbolicOperators in the future that are\n able to very cleanly reuse stuff like the `p` matrix that a RepresentationBasis defines\n\n :param operator: the operator to take the expectation of\n :type operator: Operator\n '
return operator[(self.index, self.index)]<|docstring|>Provides expectation values of operators, but the operators have to be Operator objects...
basically all the logic is inside the operator, but this is worth it for use in ExpansionWavefunction
We can also potentially add support for ExpansionOperators or SymbolicOperators in the future that are
able to very cleanly reuse stuff like the `p` matrix that a RepresentationBasis defines
:param operator: the operator to take the expectation of
:type operator: Operator<|endoftext|> |
d1eca41dcec149db90a722d0dc1116bba5e574b27c148d063eae97ebc2eba230 | def expectation(self, op, other):
'Computes the expectation value of operator op over the wavefunction other and self\n\n :param other: the other wavefunction\n :type other: AnalyticWavefunction\n :param op: the operator to take the matrix element of\n :type op: Operator\n :return:\n :rtype:\n '
o = Representation(op, ...)
return o[(self.index, other.index)] | Computes the expectation value of operator op over the wavefunction other and self
:param other: the other wavefunction
:type other: AnalyticWavefunction
:param op: the operator to take the matrix element of
:type op: Operator
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | expectation | McCoyGroup/Coordinerds | 0 | python | def expectation(self, op, other):
'Computes the expectation value of operator op over the wavefunction other and self\n\n :param other: the other wavefunction\n :type other: AnalyticWavefunction\n :param op: the operator to take the matrix element of\n :type op: Operator\n :return:\n :rtype:\n '
o = Representation(op, ...)
return o[(self.index, other.index)] | def expectation(self, op, other):
'Computes the expectation value of operator op over the wavefunction other and self\n\n :param other: the other wavefunction\n :type other: AnalyticWavefunction\n :param op: the operator to take the matrix element of\n :type op: Operator\n :return:\n :rtype:\n '
o = Representation(op, ...)
return o[(self.index, other.index)]<|docstring|>Computes the expectation value of operator op over the wavefunction other and self
:param other: the other wavefunction
:type other: AnalyticWavefunction
:param op: the operator to take the matrix element of
:type op: Operator
:return:
:rtype:<|endoftext|> |
6f7f2fc043c0830d915928cc2387a16700488473895947219fd58d895c915345 | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
return self.data | Computes the probability density of the current wavefunction
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | probability_density | McCoyGroup/Coordinerds | 0 | python | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
return self.data | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
return self.data<|docstring|>Computes the probability density of the current wavefunction
:return:
:rtype:<|endoftext|> |
bfdffa351d65452865a239736266e3ad1a14477c674538ef4d677d7f84261e88 | def __init__(self, energy, coefficients, basis_wfns):
'\n :param energy: energy of the wavefunction\n :type energy: float\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[float]\n :param basis_wfns: basis functions for the expansion\n :type basis_wfns: Wavefunctions\n '
super().__init__(energy, {'coeffs': coefficients, 'basis': basis_wfns}) | :param energy: energy of the wavefunction
:type energy: float
:param coefficients: expansion coefficients
:type coefficients: Iterable[float]
:param basis_wfns: basis functions for the expansion
:type basis_wfns: Wavefunctions | Psience/BasisReps/Wavefunctions.py | __init__ | McCoyGroup/Coordinerds | 0 | python | def __init__(self, energy, coefficients, basis_wfns):
'\n :param energy: energy of the wavefunction\n :type energy: float\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[float]\n :param basis_wfns: basis functions for the expansion\n :type basis_wfns: Wavefunctions\n '
super().__init__(energy, {'coeffs': coefficients, 'basis': basis_wfns}) | def __init__(self, energy, coefficients, basis_wfns):
'\n :param energy: energy of the wavefunction\n :type energy: float\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[float]\n :param basis_wfns: basis functions for the expansion\n :type basis_wfns: Wavefunctions\n '
super().__init__(energy, {'coeffs': coefficients, 'basis': basis_wfns})<|docstring|>:param energy: energy of the wavefunction
:type energy: float
:param coefficients: expansion coefficients
:type coefficients: Iterable[float]
:param basis_wfns: basis functions for the expansion
:type basis_wfns: Wavefunctions<|endoftext|> |
d962f7c99155eb6f8a184248af9e5d124cf8e43cc0f99d447b5f512980eeeeb4 | def evaluate(self, *args, **kwargs):
'\n Evaluates the wavecfunction as any other linear expansion.\n\n :param args: coordinates + any other args the basis takes\n :type args:\n :param kwargs: any keyword arguments the basis takes\n :type kwargs:\n :return: values of the wavefunction\n :rtype:\n '
return np.dot(self.data['coeffs'], np.array([f(args, **kwargs) for f in self.data['basis']])) | Evaluates the wavecfunction as any other linear expansion.
:param args: coordinates + any other args the basis takes
:type args:
:param kwargs: any keyword arguments the basis takes
:type kwargs:
:return: values of the wavefunction
:rtype: | Psience/BasisReps/Wavefunctions.py | evaluate | McCoyGroup/Coordinerds | 0 | python | def evaluate(self, *args, **kwargs):
'\n Evaluates the wavecfunction as any other linear expansion.\n\n :param args: coordinates + any other args the basis takes\n :type args:\n :param kwargs: any keyword arguments the basis takes\n :type kwargs:\n :return: values of the wavefunction\n :rtype:\n '
return np.dot(self.data['coeffs'], np.array([f(args, **kwargs) for f in self.data['basis']])) | def evaluate(self, *args, **kwargs):
'\n Evaluates the wavecfunction as any other linear expansion.\n\n :param args: coordinates + any other args the basis takes\n :type args:\n :param kwargs: any keyword arguments the basis takes\n :type kwargs:\n :return: values of the wavefunction\n :rtype:\n '
return np.dot(self.data['coeffs'], np.array([f(args, **kwargs) for f in self.data['basis']]))<|docstring|>Evaluates the wavecfunction as any other linear expansion.
:param args: coordinates + any other args the basis takes
:type args:
:param kwargs: any keyword arguments the basis takes
:type kwargs:
:return: values of the wavefunction
:rtype:<|endoftext|> |
99ef6389622297480fd6d03b86b32dfc498605645bc898c4ffaf20a5dcbf0353 | def expect(self, operator):
'\n Provides the expectation value of the operator `op`.\n Uses the basis to compute the reps and then expands with the expansion coeffs.\n\n :param operator:\n :type operator:\n :return:\n :rtype:\n '
op_vector = operator[(tuple((x.index for x in self.data['basis'])), tuple((x.index for x in self.data['basis'])))]
return np.dot(self.data['coeffs'], op_vector) | Provides the expectation value of the operator `op`.
Uses the basis to compute the reps and then expands with the expansion coeffs.
:param operator:
:type operator:
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | expect | McCoyGroup/Coordinerds | 0 | python | def expect(self, operator):
'\n Provides the expectation value of the operator `op`.\n Uses the basis to compute the reps and then expands with the expansion coeffs.\n\n :param operator:\n :type operator:\n :return:\n :rtype:\n '
op_vector = operator[(tuple((x.index for x in self.data['basis'])), tuple((x.index for x in self.data['basis'])))]
return np.dot(self.data['coeffs'], op_vector) | def expect(self, operator):
'\n Provides the expectation value of the operator `op`.\n Uses the basis to compute the reps and then expands with the expansion coeffs.\n\n :param operator:\n :type operator:\n :return:\n :rtype:\n '
op_vector = operator[(tuple((x.index for x in self.data['basis'])), tuple((x.index for x in self.data['basis'])))]
return np.dot(self.data['coeffs'], op_vector)<|docstring|>Provides the expectation value of the operator `op`.
Uses the basis to compute the reps and then expands with the expansion coeffs.
:param operator:
:type operator:
:return:
:rtype:<|endoftext|> |
d770bcea4c3e593041f3ddab45361fc9080bff3169b1f787dc458c1953565530 | def expectation(self, op, other):
'\n Computes the expectation value of operator `op` over the wavefunction `other` and `self`.\n **Note**: _the basis of `other`, `self`, and `op` are assumed to be the same_.\n\n :param op: an operator represented in the basis of the expansion\n :type op: Operator\n :param other: the other wavefunction to expand over\n :type other: ExpansionWavefunction\n :return:\n :rtype:\n '
op_matrix = op[(tuple((x.index for x in self.data['basis'])), tuple((o.index for o in other.basis)))]
return np.dot(self.data('coeffs'), np.dot(op_matrix, other.coeffs)) | Computes the expectation value of operator `op` over the wavefunction `other` and `self`.
**Note**: _the basis of `other`, `self`, and `op` are assumed to be the same_.
:param op: an operator represented in the basis of the expansion
:type op: Operator
:param other: the other wavefunction to expand over
:type other: ExpansionWavefunction
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | expectation | McCoyGroup/Coordinerds | 0 | python | def expectation(self, op, other):
'\n Computes the expectation value of operator `op` over the wavefunction `other` and `self`.\n **Note**: _the basis of `other`, `self`, and `op` are assumed to be the same_.\n\n :param op: an operator represented in the basis of the expansion\n :type op: Operator\n :param other: the other wavefunction to expand over\n :type other: ExpansionWavefunction\n :return:\n :rtype:\n '
op_matrix = op[(tuple((x.index for x in self.data['basis'])), tuple((o.index for o in other.basis)))]
return np.dot(self.data('coeffs'), np.dot(op_matrix, other.coeffs)) | def expectation(self, op, other):
'\n Computes the expectation value of operator `op` over the wavefunction `other` and `self`.\n **Note**: _the basis of `other`, `self`, and `op` are assumed to be the same_.\n\n :param op: an operator represented in the basis of the expansion\n :type op: Operator\n :param other: the other wavefunction to expand over\n :type other: ExpansionWavefunction\n :return:\n :rtype:\n '
op_matrix = op[(tuple((x.index for x in self.data['basis'])), tuple((o.index for o in other.basis)))]
return np.dot(self.data('coeffs'), np.dot(op_matrix, other.coeffs))<|docstring|>Computes the expectation value of operator `op` over the wavefunction `other` and `self`.
**Note**: _the basis of `other`, `self`, and `op` are assumed to be the same_.
:param op: an operator represented in the basis of the expansion
:type op: Operator
:param other: the other wavefunction to expand over
:type other: ExpansionWavefunction
:return:
:rtype:<|endoftext|> |
b5f577f2890813ae309743c42904094d47ffee7104673a9373127abae24c4d8d | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
raise NotImplementedError | Computes the probability density of the current wavefunction
:return:
:rtype: | Psience/BasisReps/Wavefunctions.py | probability_density | McCoyGroup/Coordinerds | 0 | python | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
raise NotImplementedError | def probability_density(self):
'Computes the probability density of the current wavefunction\n\n :return:\n :rtype:\n '
raise NotImplementedError<|docstring|>Computes the probability density of the current wavefunction
:return:
:rtype:<|endoftext|> |
4049e00911980430fcc28d29b21fbf6acfd2456a74852112806a6f32327da501 | def __init__(self, energies, coefficients, basis_wfns, **ops):
'\n :param energies: energies for the stored wavefunctions\n :type energies: Iterable[float]\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[Iterable[float]]\n :param basis_wfns: wavefunctions to use as the basis for the expansion\n :type basis_wfns: Wavefunctions\n :param ops: extra options for feeding through to `Wavefunctions`\n :type ops:\n '
self._basis = basis_wfns
if ('wavefunction_class' not in ops):
ops['wavefunction_class'] = ExpansionWavefunction
super().__init__(energies, coefficients, **ops) | :param energies: energies for the stored wavefunctions
:type energies: Iterable[float]
:param coefficients: expansion coefficients
:type coefficients: Iterable[Iterable[float]]
:param basis_wfns: wavefunctions to use as the basis for the expansion
:type basis_wfns: Wavefunctions
:param ops: extra options for feeding through to `Wavefunctions`
:type ops: | Psience/BasisReps/Wavefunctions.py | __init__ | McCoyGroup/Coordinerds | 0 | python | def __init__(self, energies, coefficients, basis_wfns, **ops):
'\n :param energies: energies for the stored wavefunctions\n :type energies: Iterable[float]\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[Iterable[float]]\n :param basis_wfns: wavefunctions to use as the basis for the expansion\n :type basis_wfns: Wavefunctions\n :param ops: extra options for feeding through to `Wavefunctions`\n :type ops:\n '
self._basis = basis_wfns
if ('wavefunction_class' not in ops):
ops['wavefunction_class'] = ExpansionWavefunction
super().__init__(energies, coefficients, **ops) | def __init__(self, energies, coefficients, basis_wfns, **ops):
'\n :param energies: energies for the stored wavefunctions\n :type energies: Iterable[float]\n :param coefficients: expansion coefficients\n :type coefficients: Iterable[Iterable[float]]\n :param basis_wfns: wavefunctions to use as the basis for the expansion\n :type basis_wfns: Wavefunctions\n :param ops: extra options for feeding through to `Wavefunctions`\n :type ops:\n '
self._basis = basis_wfns
if ('wavefunction_class' not in ops):
ops['wavefunction_class'] = ExpansionWavefunction
super().__init__(energies, coefficients, **ops)<|docstring|>:param energies: energies for the stored wavefunctions
:type energies: Iterable[float]
:param coefficients: expansion coefficients
:type coefficients: Iterable[Iterable[float]]
:param basis_wfns: wavefunctions to use as the basis for the expansion
:type basis_wfns: Wavefunctions
:param ops: extra options for feeding through to `Wavefunctions`
:type ops:<|endoftext|> |
ef723efc10ddbccff7b70dee5e31971933e6eb7668e43efd3ebf3cd06b267f8c | @staticmethod
def Args(parser):
'Add arguments to the parser.\n\n Args:\n parser: argparse.ArgumentParser, This is a standard argparser parser with\n which you can register arguments. See the public argparse documentation\n for its capabilities.\n '
flags.AddZoneFlag(parser) | Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities. | google-cloud-sdk/lib/surface/container/get_server_config.py | Args | KaranToor/MA450 | 1 | python | @staticmethod
def Args(parser):
'Add arguments to the parser.\n\n Args:\n parser: argparse.ArgumentParser, This is a standard argparser parser with\n which you can register arguments. See the public argparse documentation\n for its capabilities.\n '
flags.AddZoneFlag(parser) | @staticmethod
def Args(parser):
'Add arguments to the parser.\n\n Args:\n parser: argparse.ArgumentParser, This is a standard argparser parser with\n which you can register arguments. See the public argparse documentation\n for its capabilities.\n '
flags.AddZoneFlag(parser)<|docstring|>Add arguments to the parser.
Args:
parser: argparse.ArgumentParser, This is a standard argparser parser with
which you can register arguments. See the public argparse documentation
for its capabilities.<|endoftext|> |
aa2fb6d3e5f051e598083314d40b7a4ceb211bdf17660565bbde5f837d069f1c | def get_tf_tokenizer(module_handle):
'Creates a preprocessing function.'
tokenization_info = get_tokenization_info(module_handle)
table_initializer = tf.lookup.TextFileInitializer(filename=tokenization_info['vocab_file'], key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
vocab_lookup_table = tf.lookup.StaticVocabularyTable(initializer=table_initializer, num_oov_buckets=1, lookup_key_dtype=tf.string)
tokenizer = tf_text.BertTokenizer(vocab_lookup_table=vocab_lookup_table, lower_case=tokenization_info['do_lower_case'])
return (tokenizer, vocab_lookup_table) | Creates a preprocessing function. | language/orqa/utils/bert_utils.py | get_tf_tokenizer | alsuhr-c/language | 1,199 | python | def get_tf_tokenizer(module_handle):
tokenization_info = get_tokenization_info(module_handle)
table_initializer = tf.lookup.TextFileInitializer(filename=tokenization_info['vocab_file'], key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
vocab_lookup_table = tf.lookup.StaticVocabularyTable(initializer=table_initializer, num_oov_buckets=1, lookup_key_dtype=tf.string)
tokenizer = tf_text.BertTokenizer(vocab_lookup_table=vocab_lookup_table, lower_case=tokenization_info['do_lower_case'])
return (tokenizer, vocab_lookup_table) | def get_tf_tokenizer(module_handle):
tokenization_info = get_tokenization_info(module_handle)
table_initializer = tf.lookup.TextFileInitializer(filename=tokenization_info['vocab_file'], key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
vocab_lookup_table = tf.lookup.StaticVocabularyTable(initializer=table_initializer, num_oov_buckets=1, lookup_key_dtype=tf.string)
tokenizer = tf_text.BertTokenizer(vocab_lookup_table=vocab_lookup_table, lower_case=tokenization_info['do_lower_case'])
return (tokenizer, vocab_lookup_table)<|docstring|>Creates a preprocessing function.<|endoftext|> |
e7721fe82e7884a778bb1e49d6cf741da1bff3b8d6e09e551685707a77a9c84c | def tokenize_with_original_mapping(text_input, tokenizer):
'Tokenize with original mapping.'
text_input = tf.regex_replace(text_input, '\\p{Cc}|\\p{Cf}', ' ')
orig_tokens = tf_text.regex_split(text_input, bert_tokenizer._DELIM_REGEX_PATTERN, tokenizer._basic_tokenizer._keep_delim_regex_pattern, 'BertBasicTokenizer')
normalized_tokens = orig_tokens
normalized_text = text_input
if tokenizer._basic_tokenizer._lower_case:
def _do_lower_case(t):
t = tf_text.case_fold_utf8(t)
t = tf_text.normalize_utf8(t, 'NFD')
t = tf.regex_replace(t, '\\p{Mn}', '')
return t
normalized_tokens = _do_lower_case(normalized_tokens)
normalized_text = _do_lower_case(normalized_text)
wordpieces = tokenizer._wordpiece_tokenizer.tokenize(normalized_tokens)
orig_token_map = tf.ragged.range(orig_tokens.row_lengths())
orig_token_map = (tf.expand_dims(orig_token_map, 2) + tf.zeros_like(wordpieces))
wordpieces = wordpieces.merge_dims(1, 2)
orig_token_map = orig_token_map.merge_dims(1, 2)
return (orig_tokens, orig_token_map, wordpieces, normalized_text) | Tokenize with original mapping. | language/orqa/utils/bert_utils.py | tokenize_with_original_mapping | alsuhr-c/language | 1,199 | python | def tokenize_with_original_mapping(text_input, tokenizer):
text_input = tf.regex_replace(text_input, '\\p{Cc}|\\p{Cf}', ' ')
orig_tokens = tf_text.regex_split(text_input, bert_tokenizer._DELIM_REGEX_PATTERN, tokenizer._basic_tokenizer._keep_delim_regex_pattern, 'BertBasicTokenizer')
normalized_tokens = orig_tokens
normalized_text = text_input
if tokenizer._basic_tokenizer._lower_case:
def _do_lower_case(t):
t = tf_text.case_fold_utf8(t)
t = tf_text.normalize_utf8(t, 'NFD')
t = tf.regex_replace(t, '\\p{Mn}', )
return t
normalized_tokens = _do_lower_case(normalized_tokens)
normalized_text = _do_lower_case(normalized_text)
wordpieces = tokenizer._wordpiece_tokenizer.tokenize(normalized_tokens)
orig_token_map = tf.ragged.range(orig_tokens.row_lengths())
orig_token_map = (tf.expand_dims(orig_token_map, 2) + tf.zeros_like(wordpieces))
wordpieces = wordpieces.merge_dims(1, 2)
orig_token_map = orig_token_map.merge_dims(1, 2)
return (orig_tokens, orig_token_map, wordpieces, normalized_text) | def tokenize_with_original_mapping(text_input, tokenizer):
text_input = tf.regex_replace(text_input, '\\p{Cc}|\\p{Cf}', ' ')
orig_tokens = tf_text.regex_split(text_input, bert_tokenizer._DELIM_REGEX_PATTERN, tokenizer._basic_tokenizer._keep_delim_regex_pattern, 'BertBasicTokenizer')
normalized_tokens = orig_tokens
normalized_text = text_input
if tokenizer._basic_tokenizer._lower_case:
def _do_lower_case(t):
t = tf_text.case_fold_utf8(t)
t = tf_text.normalize_utf8(t, 'NFD')
t = tf.regex_replace(t, '\\p{Mn}', )
return t
normalized_tokens = _do_lower_case(normalized_tokens)
normalized_text = _do_lower_case(normalized_text)
wordpieces = tokenizer._wordpiece_tokenizer.tokenize(normalized_tokens)
orig_token_map = tf.ragged.range(orig_tokens.row_lengths())
orig_token_map = (tf.expand_dims(orig_token_map, 2) + tf.zeros_like(wordpieces))
wordpieces = wordpieces.merge_dims(1, 2)
orig_token_map = orig_token_map.merge_dims(1, 2)
return (orig_tokens, orig_token_map, wordpieces, normalized_text)<|docstring|>Tokenize with original mapping.<|endoftext|> |
796a8a8899ae1151b2e4739dce974be0435187f28111341a9815d184dec81ee5 | def pad_or_truncate_pair(token_ids_a, token_ids_b, sequence_length, cls_id, sep_id):
'Pad or truncate pair.'
token_ids_a = token_ids_a[:(sequence_length - 3)]
truncated_len_a = tf.size(token_ids_a)
maximum_len_b = tf.maximum(((sequence_length - 3) - truncated_len_a), 0)
token_ids_b = token_ids_b[:maximum_len_b]
truncated_len_b = tf.size(token_ids_b)
truncated_len_pair = (truncated_len_a + truncated_len_b)
padding = tf.zeros([((sequence_length - 3) - truncated_len_pair)], tf.int32)
token_ids = tf.concat([[cls_id], token_ids_a, [sep_id], token_ids_b, [sep_id], padding], 0)
mask = tf.concat([tf.ones([(truncated_len_pair + 3)], tf.int32), padding], 0)
segment_ids = tf.concat([tf.zeros([(truncated_len_a + 2)], tf.int32), tf.ones([(truncated_len_b + 1)], tf.int32), padding], 0)
token_ids = tf.ensure_shape(token_ids, [sequence_length])
mask = tf.ensure_shape(mask, [sequence_length])
segment_ids = tf.ensure_shape(segment_ids, [sequence_length])
return (token_ids, mask, segment_ids) | Pad or truncate pair. | language/orqa/utils/bert_utils.py | pad_or_truncate_pair | alsuhr-c/language | 1,199 | python | def pad_or_truncate_pair(token_ids_a, token_ids_b, sequence_length, cls_id, sep_id):
token_ids_a = token_ids_a[:(sequence_length - 3)]
truncated_len_a = tf.size(token_ids_a)
maximum_len_b = tf.maximum(((sequence_length - 3) - truncated_len_a), 0)
token_ids_b = token_ids_b[:maximum_len_b]
truncated_len_b = tf.size(token_ids_b)
truncated_len_pair = (truncated_len_a + truncated_len_b)
padding = tf.zeros([((sequence_length - 3) - truncated_len_pair)], tf.int32)
token_ids = tf.concat([[cls_id], token_ids_a, [sep_id], token_ids_b, [sep_id], padding], 0)
mask = tf.concat([tf.ones([(truncated_len_pair + 3)], tf.int32), padding], 0)
segment_ids = tf.concat([tf.zeros([(truncated_len_a + 2)], tf.int32), tf.ones([(truncated_len_b + 1)], tf.int32), padding], 0)
token_ids = tf.ensure_shape(token_ids, [sequence_length])
mask = tf.ensure_shape(mask, [sequence_length])
segment_ids = tf.ensure_shape(segment_ids, [sequence_length])
return (token_ids, mask, segment_ids) | def pad_or_truncate_pair(token_ids_a, token_ids_b, sequence_length, cls_id, sep_id):
token_ids_a = token_ids_a[:(sequence_length - 3)]
truncated_len_a = tf.size(token_ids_a)
maximum_len_b = tf.maximum(((sequence_length - 3) - truncated_len_a), 0)
token_ids_b = token_ids_b[:maximum_len_b]
truncated_len_b = tf.size(token_ids_b)
truncated_len_pair = (truncated_len_a + truncated_len_b)
padding = tf.zeros([((sequence_length - 3) - truncated_len_pair)], tf.int32)
token_ids = tf.concat([[cls_id], token_ids_a, [sep_id], token_ids_b, [sep_id], padding], 0)
mask = tf.concat([tf.ones([(truncated_len_pair + 3)], tf.int32), padding], 0)
segment_ids = tf.concat([tf.zeros([(truncated_len_a + 2)], tf.int32), tf.ones([(truncated_len_b + 1)], tf.int32), padding], 0)
token_ids = tf.ensure_shape(token_ids, [sequence_length])
mask = tf.ensure_shape(mask, [sequence_length])
segment_ids = tf.ensure_shape(segment_ids, [sequence_length])
return (token_ids, mask, segment_ids)<|docstring|>Pad or truncate pair.<|endoftext|> |
bec452472200c9f2502f2397ea1e1780834f826cf31173a104d6e758111c0a47 | def colorize(string, color, bold=False, highlight=False):
'Colorize a string.\n\n This function was originally written by John Schulman.\n '
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)) | Colorize a string.
This function was originally written by John Schulman. | spinup_bis/utils/logx.py | colorize | piojanu/spinningup_tf2 | 19 | python | def colorize(string, color, bold=False, highlight=False):
'Colorize a string.\n\n This function was originally written by John Schulman.\n '
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)) | def colorize(string, color, bold=False, highlight=False):
'Colorize a string.\n\n This function was originally written by John Schulman.\n '
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))<|docstring|>Colorize a string.
This function was originally written by John Schulman.<|endoftext|> |
fe1a4cc1abed9edd82a4f3e80a8bb4638fa50b5decb762fe26abc57156ca3e3c | def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None, neptune_kwargs=None):
'Initialize a Logger.\n\n Args:\n output_dir (string): A directory for saving results to. If\n ``None``, defaults to a temp directory of the form\n ``/tmp/experiments/somerandomnumber``.\n\n output_fname (string): Name for the tab-separated-value file\n containing metrics logged throughout a training run.\n Defaults to ``progress.txt``.\n\n exp_name (string): Experiment name. If you run multiple training\n runs and give them all the same ``exp_name``, the plotter\n will know to group them. (Use case: if you run the same\n hyperparameter configuration with multiple random seeds, you\n should give them all the same ``exp_name``.)\n\n neptune_kwargs (dict): Neptune init kwargs. If None, then Neptune\n logging is disabled.\n '
if (mpi_tools.proc_id() == 0):
self.output_dir = (output_dir or ('/tmp/experiments/%i' % int(time.time())))
if osp.exists(self.output_dir):
print(('Warning: Log dir %s already exists! Storing info there anyway.' % self.output_dir))
else:
os.makedirs(self.output_dir)
self.output_file = open(osp.join(self.output_dir, output_fname), 'w')
atexit.register(self.output_file.close)
print(colorize(('Logging data to %s' % self.output_file.name), 'green', bold=True))
if (neptune_kwargs is not None):
import neptune.new as neptune
self.neptune_run = neptune.init(**neptune_kwargs)
else:
self.neptune_run = None
else:
self.output_dir = None
self.output_file = None
self.neptune_run = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self.exp_name = exp_name | Initialize a Logger.
Args:
output_dir (string): A directory for saving results to. If
``None``, defaults to a temp directory of the form
``/tmp/experiments/somerandomnumber``.
output_fname (string): Name for the tab-separated-value file
containing metrics logged throughout a training run.
Defaults to ``progress.txt``.
exp_name (string): Experiment name. If you run multiple training
runs and give them all the same ``exp_name``, the plotter
will know to group them. (Use case: if you run the same
hyperparameter configuration with multiple random seeds, you
should give them all the same ``exp_name``.)
neptune_kwargs (dict): Neptune init kwargs. If None, then Neptune
logging is disabled. | spinup_bis/utils/logx.py | __init__ | piojanu/spinningup_tf2 | 19 | python | def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None, neptune_kwargs=None):
'Initialize a Logger.\n\n Args:\n output_dir (string): A directory for saving results to. If\n ``None``, defaults to a temp directory of the form\n ``/tmp/experiments/somerandomnumber``.\n\n output_fname (string): Name for the tab-separated-value file\n containing metrics logged throughout a training run.\n Defaults to ``progress.txt``.\n\n exp_name (string): Experiment name. If you run multiple training\n runs and give them all the same ``exp_name``, the plotter\n will know to group them. (Use case: if you run the same\n hyperparameter configuration with multiple random seeds, you\n should give them all the same ``exp_name``.)\n\n neptune_kwargs (dict): Neptune init kwargs. If None, then Neptune\n logging is disabled.\n '
if (mpi_tools.proc_id() == 0):
self.output_dir = (output_dir or ('/tmp/experiments/%i' % int(time.time())))
if osp.exists(self.output_dir):
print(('Warning: Log dir %s already exists! Storing info there anyway.' % self.output_dir))
else:
os.makedirs(self.output_dir)
self.output_file = open(osp.join(self.output_dir, output_fname), 'w')
atexit.register(self.output_file.close)
print(colorize(('Logging data to %s' % self.output_file.name), 'green', bold=True))
if (neptune_kwargs is not None):
import neptune.new as neptune
self.neptune_run = neptune.init(**neptune_kwargs)
else:
self.neptune_run = None
else:
self.output_dir = None
self.output_file = None
self.neptune_run = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self.exp_name = exp_name | def __init__(self, output_dir=None, output_fname='progress.txt', exp_name=None, neptune_kwargs=None):
'Initialize a Logger.\n\n Args:\n output_dir (string): A directory for saving results to. If\n ``None``, defaults to a temp directory of the form\n ``/tmp/experiments/somerandomnumber``.\n\n output_fname (string): Name for the tab-separated-value file\n containing metrics logged throughout a training run.\n Defaults to ``progress.txt``.\n\n exp_name (string): Experiment name. If you run multiple training\n runs and give them all the same ``exp_name``, the plotter\n will know to group them. (Use case: if you run the same\n hyperparameter configuration with multiple random seeds, you\n should give them all the same ``exp_name``.)\n\n neptune_kwargs (dict): Neptune init kwargs. If None, then Neptune\n logging is disabled.\n '
if (mpi_tools.proc_id() == 0):
self.output_dir = (output_dir or ('/tmp/experiments/%i' % int(time.time())))
if osp.exists(self.output_dir):
print(('Warning: Log dir %s already exists! Storing info there anyway.' % self.output_dir))
else:
os.makedirs(self.output_dir)
self.output_file = open(osp.join(self.output_dir, output_fname), 'w')
atexit.register(self.output_file.close)
print(colorize(('Logging data to %s' % self.output_file.name), 'green', bold=True))
if (neptune_kwargs is not None):
import neptune.new as neptune
self.neptune_run = neptune.init(**neptune_kwargs)
else:
self.neptune_run = None
else:
self.output_dir = None
self.output_file = None
self.neptune_run = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self.exp_name = exp_name<|docstring|>Initialize a Logger.
Args:
output_dir (string): A directory for saving results to. If
``None``, defaults to a temp directory of the form
``/tmp/experiments/somerandomnumber``.
output_fname (string): Name for the tab-separated-value file
containing metrics logged throughout a training run.
Defaults to ``progress.txt``.
exp_name (string): Experiment name. If you run multiple training
runs and give them all the same ``exp_name``, the plotter
will know to group them. (Use case: if you run the same
hyperparameter configuration with multiple random seeds, you
should give them all the same ``exp_name``.)
neptune_kwargs (dict): Neptune init kwargs. If None, then Neptune
logging is disabled.<|endoftext|> |
3311710c2d0704d902e32d752bc5c44cbe8f27ee0c83b47203d8453e307a9c5d | def log(self, msg, color='green'):
'Print a colorized message to stdout.'
if (mpi_tools.proc_id() == 0):
print(colorize(msg, color, bold=True)) | Print a colorized message to stdout. | spinup_bis/utils/logx.py | log | piojanu/spinningup_tf2 | 19 | python | def log(self, msg, color='green'):
if (mpi_tools.proc_id() == 0):
print(colorize(msg, color, bold=True)) | def log(self, msg, color='green'):
if (mpi_tools.proc_id() == 0):
print(colorize(msg, color, bold=True))<|docstring|>Print a colorized message to stdout.<|endoftext|> |
cace6cbc8ec870e48e5f700eabe8b0d2d35de2faa358de793d152a40e298524e | def log_tabular(self, key, val):
'Log a value of some diagnostic.\n\n Call this only once for each diagnostic quantity, each iteration.\n After using ``log_tabular`` to store values for each diagnostic,\n make sure to call ``dump_tabular`` to write them out to file and\n stdout (otherwise they will not get saved anywhere).\n '
if self.first_row:
self.log_headers.append(key)
else:
assert (key in self.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
assert (key not in self.log_current_row), ('You already set %s this iteration. Maybe you forgot to call dump_tabular()' % key)
self.log_current_row[key] = val | Log a value of some diagnostic.
Call this only once for each diagnostic quantity, each iteration.
After using ``log_tabular`` to store values for each diagnostic,
make sure to call ``dump_tabular`` to write them out to file and
stdout (otherwise they will not get saved anywhere). | spinup_bis/utils/logx.py | log_tabular | piojanu/spinningup_tf2 | 19 | python | def log_tabular(self, key, val):
'Log a value of some diagnostic.\n\n Call this only once for each diagnostic quantity, each iteration.\n After using ``log_tabular`` to store values for each diagnostic,\n make sure to call ``dump_tabular`` to write them out to file and\n stdout (otherwise they will not get saved anywhere).\n '
if self.first_row:
self.log_headers.append(key)
else:
assert (key in self.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
assert (key not in self.log_current_row), ('You already set %s this iteration. Maybe you forgot to call dump_tabular()' % key)
self.log_current_row[key] = val | def log_tabular(self, key, val):
'Log a value of some diagnostic.\n\n Call this only once for each diagnostic quantity, each iteration.\n After using ``log_tabular`` to store values for each diagnostic,\n make sure to call ``dump_tabular`` to write them out to file and\n stdout (otherwise they will not get saved anywhere).\n '
if self.first_row:
self.log_headers.append(key)
else:
assert (key in self.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
assert (key not in self.log_current_row), ('You already set %s this iteration. Maybe you forgot to call dump_tabular()' % key)
self.log_current_row[key] = val<|docstring|>Log a value of some diagnostic.
Call this only once for each diagnostic quantity, each iteration.
After using ``log_tabular`` to store values for each diagnostic,
make sure to call ``dump_tabular`` to write them out to file and
stdout (otherwise they will not get saved anywhere).<|endoftext|> |
0a69f961986feaf00b41205f9fe0810fdb04d1db1d90be4b6d34d148491aaa08 | def save_config(self, config):
"Log an experiment configuration.\n\n Call this once at the top of your experiment, passing in all important\n config vars as a dict. This will serialize the config to JSON, while\n handling anything which can't be serialized in a graceful way (writing\n as informative a string as possible).\n\n Example use:\n\n .. code-block:: python\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n "
config_json = serialization_utils.convert_json(config)
if (self.exp_name is not None):
config_json['exp_name'] = self.exp_name
if (mpi_tools.proc_id() == 0):
output = json.dumps(config_json, separators=(',', ':\t'), indent=4, sort_keys=True)
print(colorize('Saving config:\n', color='cyan', bold=True))
print(output)
with open(osp.join(self.output_dir, 'config.json'), 'w') as out:
out.write(output)
if (self.neptune_run is not None):
print(colorize('Saving config to Neptune...\n', color='cyan'))
self.neptune_run['parameters'] = config_json | Log an experiment configuration.
Call this once at the top of your experiment, passing in all important
config vars as a dict. This will serialize the config to JSON, while
handling anything which can't be serialized in a graceful way (writing
as informative a string as possible).
Example use:
.. code-block:: python
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals()) | spinup_bis/utils/logx.py | save_config | piojanu/spinningup_tf2 | 19 | python | def save_config(self, config):
"Log an experiment configuration.\n\n Call this once at the top of your experiment, passing in all important\n config vars as a dict. This will serialize the config to JSON, while\n handling anything which can't be serialized in a graceful way (writing\n as informative a string as possible).\n\n Example use:\n\n .. code-block:: python\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n "
config_json = serialization_utils.convert_json(config)
if (self.exp_name is not None):
config_json['exp_name'] = self.exp_name
if (mpi_tools.proc_id() == 0):
output = json.dumps(config_json, separators=(',', ':\t'), indent=4, sort_keys=True)
print(colorize('Saving config:\n', color='cyan', bold=True))
print(output)
with open(osp.join(self.output_dir, 'config.json'), 'w') as out:
out.write(output)
if (self.neptune_run is not None):
print(colorize('Saving config to Neptune...\n', color='cyan'))
self.neptune_run['parameters'] = config_json | def save_config(self, config):
"Log an experiment configuration.\n\n Call this once at the top of your experiment, passing in all important\n config vars as a dict. This will serialize the config to JSON, while\n handling anything which can't be serialized in a graceful way (writing\n as informative a string as possible).\n\n Example use:\n\n .. code-block:: python\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n "
config_json = serialization_utils.convert_json(config)
if (self.exp_name is not None):
config_json['exp_name'] = self.exp_name
if (mpi_tools.proc_id() == 0):
output = json.dumps(config_json, separators=(',', ':\t'), indent=4, sort_keys=True)
print(colorize('Saving config:\n', color='cyan', bold=True))
print(output)
with open(osp.join(self.output_dir, 'config.json'), 'w') as out:
out.write(output)
if (self.neptune_run is not None):
print(colorize('Saving config to Neptune...\n', color='cyan'))
self.neptune_run['parameters'] = config_json<|docstring|>Log an experiment configuration.
Call this once at the top of your experiment, passing in all important
config vars as a dict. This will serialize the config to JSON, while
handling anything which can't be serialized in a graceful way (writing
as informative a string as possible).
Example use:
.. code-block:: python
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())<|endoftext|> |
6fb21d06ce94d73cf8ea036850936ac91ea2e6eefb867b0296841740bc61dbb7 | def dump_tabular(self):
'Write all of the diagnostics from the current iteration.\n\n Writes both to stdout, and to the output file.\n '
if (mpi_tools.proc_id() == 0):
vals = []
key_lens = [len(key) for key in self.log_headers]
max_key_len = max(15, max(key_lens))
keystr = ('%' + ('%d' % max_key_len))
fmt = (('| ' + keystr) + 's | %15s |')
n_slashes = (22 + max_key_len)
print(('-' * n_slashes))
for key in self.log_headers:
val = self.log_current_row.get(key, '')
valstr = (('%8.3g' % val) if hasattr(val, '__float__') else val)
print((fmt % (key, valstr)))
vals.append(val)
if (self.neptune_run is not None):
step = self.log_current_row.get('TotalEnvInteracts')
if ('Test' in key):
nkey = ('test/' + key)
else:
nkey = ('train/' + key)
self.neptune_run[nkey].log(val, step)
print(('-' * n_slashes), flush=True)
if (self.output_file is not None):
if self.first_row:
self.output_file.write(('\t'.join(self.log_headers) + '\n'))
self.output_file.write(('\t'.join(map(str, vals)) + '\n'))
self.output_file.flush()
self.log_current_row.clear()
self.first_row = False | Write all of the diagnostics from the current iteration.
Writes both to stdout, and to the output file. | spinup_bis/utils/logx.py | dump_tabular | piojanu/spinningup_tf2 | 19 | python | def dump_tabular(self):
'Write all of the diagnostics from the current iteration.\n\n Writes both to stdout, and to the output file.\n '
if (mpi_tools.proc_id() == 0):
vals = []
key_lens = [len(key) for key in self.log_headers]
max_key_len = max(15, max(key_lens))
keystr = ('%' + ('%d' % max_key_len))
fmt = (('| ' + keystr) + 's | %15s |')
n_slashes = (22 + max_key_len)
print(('-' * n_slashes))
for key in self.log_headers:
val = self.log_current_row.get(key, )
valstr = (('%8.3g' % val) if hasattr(val, '__float__') else val)
print((fmt % (key, valstr)))
vals.append(val)
if (self.neptune_run is not None):
step = self.log_current_row.get('TotalEnvInteracts')
if ('Test' in key):
nkey = ('test/' + key)
else:
nkey = ('train/' + key)
self.neptune_run[nkey].log(val, step)
print(('-' * n_slashes), flush=True)
if (self.output_file is not None):
if self.first_row:
self.output_file.write(('\t'.join(self.log_headers) + '\n'))
self.output_file.write(('\t'.join(map(str, vals)) + '\n'))
self.output_file.flush()
self.log_current_row.clear()
self.first_row = False | def dump_tabular(self):
'Write all of the diagnostics from the current iteration.\n\n Writes both to stdout, and to the output file.\n '
if (mpi_tools.proc_id() == 0):
vals = []
key_lens = [len(key) for key in self.log_headers]
max_key_len = max(15, max(key_lens))
keystr = ('%' + ('%d' % max_key_len))
fmt = (('| ' + keystr) + 's | %15s |')
n_slashes = (22 + max_key_len)
print(('-' * n_slashes))
for key in self.log_headers:
val = self.log_current_row.get(key, )
valstr = (('%8.3g' % val) if hasattr(val, '__float__') else val)
print((fmt % (key, valstr)))
vals.append(val)
if (self.neptune_run is not None):
step = self.log_current_row.get('TotalEnvInteracts')
if ('Test' in key):
nkey = ('test/' + key)
else:
nkey = ('train/' + key)
self.neptune_run[nkey].log(val, step)
print(('-' * n_slashes), flush=True)
if (self.output_file is not None):
if self.first_row:
self.output_file.write(('\t'.join(self.log_headers) + '\n'))
self.output_file.write(('\t'.join(map(str, vals)) + '\n'))
self.output_file.flush()
self.log_current_row.clear()
self.first_row = False<|docstring|>Write all of the diagnostics from the current iteration.
Writes both to stdout, and to the output file.<|endoftext|> |
137d4ee9648040386ff945737e9310ea83d0f6f9eec5c0d7f919d0beb768b421 | def store(self, **kwargs):
"Save something into the epoch_logger's current state.\n\n Provide an arbitrary number of keyword arguments with numerical\n values.\n "
for (k, v) in kwargs.items():
if (k not in self.epoch_dict.keys()):
self.epoch_dict[k] = []
self.epoch_dict[k].append(v) | Save something into the epoch_logger's current state.
Provide an arbitrary number of keyword arguments with numerical
values. | spinup_bis/utils/logx.py | store | piojanu/spinningup_tf2 | 19 | python | def store(self, **kwargs):
"Save something into the epoch_logger's current state.\n\n Provide an arbitrary number of keyword arguments with numerical\n values.\n "
for (k, v) in kwargs.items():
if (k not in self.epoch_dict.keys()):
self.epoch_dict[k] = []
self.epoch_dict[k].append(v) | def store(self, **kwargs):
"Save something into the epoch_logger's current state.\n\n Provide an arbitrary number of keyword arguments with numerical\n values.\n "
for (k, v) in kwargs.items():
if (k not in self.epoch_dict.keys()):
self.epoch_dict[k] = []
self.epoch_dict[k].append(v)<|docstring|>Save something into the epoch_logger's current state.
Provide an arbitrary number of keyword arguments with numerical
values.<|endoftext|> |
a19217b2850500ea4ef62b3d1bba97a54bda996e0af64e976b6a364d17c4771f | def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False):
'Log a value or possibly the mean/std/min/max values of a diagnostic.\n\n Args:\n key (string): The name of the diagnostic. If you are logging a\n diagnostic whose state has previously been saved with\n ``store``, the key here has to match the key you used there.\n\n val: A value for the diagnostic. If you have previously saved\n values for this key via ``store``, do *not* provide a ``val``\n here.\n\n with_min_and_max (bool): If true, log min and max values of the\n diagnostic over the epoch.\n\n average_only (bool): If true, do not log the standard deviation\n of the diagnostic over the epoch.\n '
if (val is not None):
super().log_tabular(key, val)
else:
v = self.epoch_dict[key]
vals = (np.concatenate(v) if (isinstance(v[0], np.ndarray) and (len(v[0].shape) > 0)) else v)
stats = mpi_tools.mpi_statistics_scalar(vals, with_min_and_max=with_min_and_max)
super().log_tabular((key if average_only else ('Average' + key)), stats[0])
if (not average_only):
super().log_tabular(('Std' + key), stats[1])
if with_min_and_max:
super().log_tabular(('Max' + key), stats[3])
super().log_tabular(('Min' + key), stats[2])
self.epoch_dict[key] = [] | Log a value or possibly the mean/std/min/max values of a diagnostic.
Args:
key (string): The name of the diagnostic. If you are logging a
diagnostic whose state has previously been saved with
``store``, the key here has to match the key you used there.
val: A value for the diagnostic. If you have previously saved
values for this key via ``store``, do *not* provide a ``val``
here.
with_min_and_max (bool): If true, log min and max values of the
diagnostic over the epoch.
average_only (bool): If true, do not log the standard deviation
of the diagnostic over the epoch. | spinup_bis/utils/logx.py | log_tabular | piojanu/spinningup_tf2 | 19 | python | def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False):
'Log a value or possibly the mean/std/min/max values of a diagnostic.\n\n Args:\n key (string): The name of the diagnostic. If you are logging a\n diagnostic whose state has previously been saved with\n ``store``, the key here has to match the key you used there.\n\n val: A value for the diagnostic. If you have previously saved\n values for this key via ``store``, do *not* provide a ``val``\n here.\n\n with_min_and_max (bool): If true, log min and max values of the\n diagnostic over the epoch.\n\n average_only (bool): If true, do not log the standard deviation\n of the diagnostic over the epoch.\n '
if (val is not None):
super().log_tabular(key, val)
else:
v = self.epoch_dict[key]
vals = (np.concatenate(v) if (isinstance(v[0], np.ndarray) and (len(v[0].shape) > 0)) else v)
stats = mpi_tools.mpi_statistics_scalar(vals, with_min_and_max=with_min_and_max)
super().log_tabular((key if average_only else ('Average' + key)), stats[0])
if (not average_only):
super().log_tabular(('Std' + key), stats[1])
if with_min_and_max:
super().log_tabular(('Max' + key), stats[3])
super().log_tabular(('Min' + key), stats[2])
self.epoch_dict[key] = [] | def log_tabular(self, key, val=None, with_min_and_max=False, average_only=False):
'Log a value or possibly the mean/std/min/max values of a diagnostic.\n\n Args:\n key (string): The name of the diagnostic. If you are logging a\n diagnostic whose state has previously been saved with\n ``store``, the key here has to match the key you used there.\n\n val: A value for the diagnostic. If you have previously saved\n values for this key via ``store``, do *not* provide a ``val``\n here.\n\n with_min_and_max (bool): If true, log min and max values of the\n diagnostic over the epoch.\n\n average_only (bool): If true, do not log the standard deviation\n of the diagnostic over the epoch.\n '
if (val is not None):
super().log_tabular(key, val)
else:
v = self.epoch_dict[key]
vals = (np.concatenate(v) if (isinstance(v[0], np.ndarray) and (len(v[0].shape) > 0)) else v)
stats = mpi_tools.mpi_statistics_scalar(vals, with_min_and_max=with_min_and_max)
super().log_tabular((key if average_only else ('Average' + key)), stats[0])
if (not average_only):
super().log_tabular(('Std' + key), stats[1])
if with_min_and_max:
super().log_tabular(('Max' + key), stats[3])
super().log_tabular(('Min' + key), stats[2])
self.epoch_dict[key] = []<|docstring|>Log a value or possibly the mean/std/min/max values of a diagnostic.
Args:
key (string): The name of the diagnostic. If you are logging a
diagnostic whose state has previously been saved with
``store``, the key here has to match the key you used there.
val: A value for the diagnostic. If you have previously saved
values for this key via ``store``, do *not* provide a ``val``
here.
with_min_and_max (bool): If true, log min and max values of the
diagnostic over the epoch.
average_only (bool): If true, do not log the standard deviation
of the diagnostic over the epoch.<|endoftext|> |
e3584e7537f54406bc693a061160327936ddf81c1a5d36de9c0e4c3128892a9c | def test_markdown_page(client: Client) -> None:
'Test markdown rendering.'
response = client.get(reverse('about_test'))
assert (response.status_code == 200)
assert ('<h3>ReproHack History</h3>' in response.content.decode()) | Test markdown rendering. | reprohack_hub/reprohack/tests/test_views.py | test_markdown_page | Joe-Heffer-Shef/reprohack_site | 0 | python | def test_markdown_page(client: Client) -> None:
response = client.get(reverse('about_test'))
assert (response.status_code == 200)
assert ('<h3>ReproHack History</h3>' in response.content.decode()) | def test_markdown_page(client: Client) -> None:
response = client.get(reverse('about_test'))
assert (response.status_code == 200)
assert ('<h3>ReproHack History</h3>' in response.content.decode())<|docstring|>Test markdown rendering.<|endoftext|> |
4a2e6f658956fcf717c1883fea175ffe8b8c37d95efa56adb87ce7d32dbd1818 | def test_create_review(client: Client, user: User, review: Review) -> None:
'Test creating a review.'
assert (user not in review.reviewers.all())
review_dict = model_to_dict(review)
client.force_login(user)
response = client.post(reverse('review_new'), review_dict, follow=True)
assert (response.status_code == 200)
rendered_response = response.render()
assert (review.paper.title in rendered_response.content.decode())
assert (user in review.paper.review_set.last().reviewers.all())
assert (user not in review.reviewers.all()) | Test creating a review. | reprohack_hub/reprohack/tests/test_views.py | test_create_review | Joe-Heffer-Shef/reprohack_site | 0 | python | def test_create_review(client: Client, user: User, review: Review) -> None:
assert (user not in review.reviewers.all())
review_dict = model_to_dict(review)
client.force_login(user)
response = client.post(reverse('review_new'), review_dict, follow=True)
assert (response.status_code == 200)
rendered_response = response.render()
assert (review.paper.title in rendered_response.content.decode())
assert (user in review.paper.review_set.last().reviewers.all())
assert (user not in review.reviewers.all()) | def test_create_review(client: Client, user: User, review: Review) -> None:
assert (user not in review.reviewers.all())
review_dict = model_to_dict(review)
client.force_login(user)
response = client.post(reverse('review_new'), review_dict, follow=True)
assert (response.status_code == 200)
rendered_response = response.render()
assert (review.paper.title in rendered_response.content.decode())
assert (user in review.paper.review_set.last().reviewers.all())
assert (user not in review.reviewers.all())<|docstring|>Test creating a review.<|endoftext|> |
91d0732ea4c346d2dd91804eec23d954602e8a5b614236470547343dc59fcc9f | def display_page(self, number: Optional[int]=None) -> None:
'Update page content and current page number, if possible.'
img_lines = self.model.get_page_content(target_size=self.screen_size, number=number)
if (img_lines is not None):
self.view.set_page_content(img_lines)
if (number is not None):
self.model.current_page_number = number
self.view.set_title_pagecount(number)
self.model.page_region = None | Update page content and current page number, if possible. | pdftty/controller.py | display_page | kpj/pdftty | 1 | python | def display_page(self, number: Optional[int]=None) -> None:
img_lines = self.model.get_page_content(target_size=self.screen_size, number=number)
if (img_lines is not None):
self.view.set_page_content(img_lines)
if (number is not None):
self.model.current_page_number = number
self.view.set_title_pagecount(number)
self.model.page_region = None | def display_page(self, number: Optional[int]=None) -> None:
img_lines = self.model.get_page_content(target_size=self.screen_size, number=number)
if (img_lines is not None):
self.view.set_page_content(img_lines)
if (number is not None):
self.model.current_page_number = number
self.view.set_title_pagecount(number)
self.model.page_region = None<|docstring|>Update page content and current page number, if possible.<|endoftext|> |
77b6b8337787f5961b97b51a3a1779802f18a9df9d3db1617cb9ac52a9f9a1ed | async def test_setup(hass: HomeAssistant, fritz: Mock):
'Test setup of platform.'
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_ON)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Alarm')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.WINDOW)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock on Device')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock via UI')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_battery')
assert state
assert (state.state == '23')
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Battery')
assert (state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE)
assert (ATTR_STATE_CLASS not in state.attributes) | Test setup of platform. | tests/components/fritzbox/test_binary_sensor.py | test_setup | GrandMoff100/homeassistant-core | 30,023 | python | async def test_setup(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_ON)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Alarm')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.WINDOW)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock on Device')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock via UI')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_battery')
assert state
assert (state.state == '23')
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Battery')
assert (state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE)
assert (ATTR_STATE_CLASS not in state.attributes) | async def test_setup(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_ON)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Alarm')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.WINDOW)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock on Device')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_OFF)
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Button Lock via UI')
assert (state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.LOCK)
assert (ATTR_STATE_CLASS not in state.attributes)
state = hass.states.get(f'{SENSOR_DOMAIN}.{CONF_FAKE_NAME}_battery')
assert state
assert (state.state == '23')
assert (state.attributes[ATTR_FRIENDLY_NAME] == f'{CONF_FAKE_NAME} Battery')
assert (state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE)
assert (ATTR_STATE_CLASS not in state.attributes)<|docstring|>Test setup of platform.<|endoftext|> |
820a0650b07bc47681ad2a749bc411407b0fd8bb0a4186bba6f0f78dbbfa6c63 | async def test_is_off(hass: HomeAssistant, fritz: Mock):
'Test state of platform.'
device = FritzDeviceBinarySensorMock()
device.present = False
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_UNAVAILABLE) | Test state of platform. | tests/components/fritzbox/test_binary_sensor.py | test_is_off | GrandMoff100/homeassistant-core | 30,023 | python | async def test_is_off(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
device.present = False
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_UNAVAILABLE) | async def test_is_off(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
device.present = False
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
state = hass.states.get(f'{ENTITY_ID}_alarm')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_on_device')
assert state
assert (state.state == STATE_UNAVAILABLE)
state = hass.states.get(f'{ENTITY_ID}_button_lock_via_ui')
assert state
assert (state.state == STATE_UNAVAILABLE)<|docstring|>Test state of platform.<|endoftext|> |
63a87277e56a4ad8f0ff769916cedd05399b9cd58c494f980ad25e4fe649f54a | async def test_update(hass: HomeAssistant, fritz: Mock):
'Test update without error.'
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1) | Test update without error. | tests/components/fritzbox/test_binary_sensor.py | test_update | GrandMoff100/homeassistant-core | 30,023 | python | async def test_update(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1) | async def test_update(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1)<|docstring|>Test update without error.<|endoftext|> |
015d0245ec854c10897fc5827ba000b1c5152dab74227d7284ac28104edcb007 | async def test_update_error(hass: HomeAssistant, fritz: Mock):
'Test update with error.'
device = FritzDeviceBinarySensorMock()
device.update.side_effect = [mock.DEFAULT, HTTPError('Boom')]
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1) | Test update with error. | tests/components/fritzbox/test_binary_sensor.py | test_update_error | GrandMoff100/homeassistant-core | 30,023 | python | async def test_update_error(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
device.update.side_effect = [mock.DEFAULT, HTTPError('Boom')]
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1) | async def test_update_error(hass: HomeAssistant, fritz: Mock):
device = FritzDeviceBinarySensorMock()
device.update.side_effect = [mock.DEFAULT, HTTPError('Boom')]
assert (await setup_config_entry(hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz))
assert (fritz().update_devices.call_count == 1)
assert (fritz().login.call_count == 1)
next_update = (dt_util.utcnow() + timedelta(seconds=200))
async_fire_time_changed(hass, next_update)
(await hass.async_block_till_done())
assert (fritz().update_devices.call_count == 2)
assert (fritz().login.call_count == 1)<|docstring|>Test update with error.<|endoftext|> |
2118b85026cacd949efdab79e276a5616b9e73efeb2f34076095cf2db26e3a1d | def is_member(user: User) -> bool:
'\n Checks whether the given user is a member.\n A member must have a name starting with "L".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('L') | Checks whether the given user is a member.
A member must have a name starting with "L".
:param user: User
:return: bool | Testing/unit_test/pytest_for_python/src/codes.py | is_member | Ziang-Lu/Software-Development-and-Design | 1 | python | def is_member(user: User) -> bool:
'\n Checks whether the given user is a member.\n A member must have a name starting with "L".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('L') | def is_member(user: User) -> bool:
'\n Checks whether the given user is a member.\n A member must have a name starting with "L".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('L')<|docstring|>Checks whether the given user is a member.
A member must have a name starting with "L".
:param user: User
:return: bool<|endoftext|> |
fa5455518707250e0922f379e1315a7a6a2b14c44e8ff5b48a037b5e8ab08ac4 | def is_prime_member(user: User) -> bool:
'\n Checks whether the given user is a prime member.\n A prime member must have a name starting with "W".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('W') | Checks whether the given user is a prime member.
A prime member must have a name starting with "W".
:param user: User
:return: bool | Testing/unit_test/pytest_for_python/src/codes.py | is_prime_member | Ziang-Lu/Software-Development-and-Design | 1 | python | def is_prime_member(user: User) -> bool:
'\n Checks whether the given user is a prime member.\n A prime member must have a name starting with "W".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('W') | def is_prime_member(user: User) -> bool:
'\n Checks whether the given user is a prime member.\n A prime member must have a name starting with "W".\n :param user: User\n :return: bool\n '
if (not user):
raise TypeError('user should not be None')
return user.name.startswith('W')<|docstring|>Checks whether the given user is a prime member.
A prime member must have a name starting with "W".
:param user: User
:return: bool<|endoftext|> |
b928209e039133bb56d79f41b1149fa5f61e133e79576cebe6e26bb378969ef4 | def __init__(self, name: str, pwd: str):
'\n Constructor with parameter.\n :param name: str\n :param pwd: str\n '
print('This is a long long process of creating a user...')
self._name = name
self._pwd = pwd | Constructor with parameter.
:param name: str
:param pwd: str | Testing/unit_test/pytest_for_python/src/codes.py | __init__ | Ziang-Lu/Software-Development-and-Design | 1 | python | def __init__(self, name: str, pwd: str):
'\n Constructor with parameter.\n :param name: str\n :param pwd: str\n '
print('This is a long long process of creating a user...')
self._name = name
self._pwd = pwd | def __init__(self, name: str, pwd: str):
'\n Constructor with parameter.\n :param name: str\n :param pwd: str\n '
print('This is a long long process of creating a user...')
self._name = name
self._pwd = pwd<|docstring|>Constructor with parameter.
:param name: str
:param pwd: str<|endoftext|> |
93a8f435239bbc6efbc9490469dd07a93e70c1018bbbb4f2b96b23501f46769e | @property
def name(self) -> str:
'\n Accessor of name.\n :return: str\n '
return self._name | Accessor of name.
:return: str | Testing/unit_test/pytest_for_python/src/codes.py | name | Ziang-Lu/Software-Development-and-Design | 1 | python | @property
def name(self) -> str:
'\n Accessor of name.\n :return: str\n '
return self._name | @property
def name(self) -> str:
'\n Accessor of name.\n :return: str\n '
return self._name<|docstring|>Accessor of name.
:return: str<|endoftext|> |
4f734f2e592931c6352888b2e9397adf337c4725fd017ffe63b6ff993c196fd4 | @property
def pwd(self) -> str:
'\n Accessor of pwd.\n :return: str\n '
return self._pwd | Accessor of pwd.
:return: str | Testing/unit_test/pytest_for_python/src/codes.py | pwd | Ziang-Lu/Software-Development-and-Design | 1 | python | @property
def pwd(self) -> str:
'\n Accessor of pwd.\n :return: str\n '
return self._pwd | @property
def pwd(self) -> str:
'\n Accessor of pwd.\n :return: str\n '
return self._pwd<|docstring|>Accessor of pwd.
:return: str<|endoftext|> |
66e924610d66234cb99114f2d53f3288df7eed3d11a5e8abaa142e0a7d54100f | def clean_up(self) -> None:
'\n Dummy method to do some clean-up work.\n '
print('Doing some clean-up work...') | Dummy method to do some clean-up work. | Testing/unit_test/pytest_for_python/src/codes.py | clean_up | Ziang-Lu/Software-Development-and-Design | 1 | python | def clean_up(self) -> None:
'\n \n '
print('Doing some clean-up work...') | def clean_up(self) -> None:
'\n \n '
print('Doing some clean-up work...')<|docstring|>Dummy method to do some clean-up work.<|endoftext|> |
13f855e78a628a7ebd87ded259359623560368fd84341a99a5288827bb565c92 | def normalize(type_str):
'\n TODO\n '
assert False | TODO | bimini/grammar.py | normalize | vaporydev/bimini | 7 | python | def normalize(type_str):
'\n \n '
assert False | def normalize(type_str):
'\n \n '
assert False<|docstring|>TODO<|endoftext|> |
03cef274a461e659efbad5a7e5fd7a0d26b82c79808ac7bdb8edf16bef6b58c8 | @functools.lru_cache(maxsize=None)
def parse(self, type_str):
'\n Parses a type string into an appropriate instance of\n :class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,\n throws :class:`~eth_abi.exceptions.ParseError`.\n\n :param type_str: The type string to be parsed.\n :returns: An instance of :class:`~eth_abi.grammar.ABIType` containing\n information about the parsed type string.\n '
if (not isinstance(type_str, str)):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr) | Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string. | bimini/grammar.py | parse | vaporydev/bimini | 7 | python | @functools.lru_cache(maxsize=None)
def parse(self, type_str):
'\n Parses a type string into an appropriate instance of\n :class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,\n throws :class:`~eth_abi.exceptions.ParseError`.\n\n :param type_str: The type string to be parsed.\n :returns: An instance of :class:`~eth_abi.grammar.ABIType` containing\n information about the parsed type string.\n '
if (not isinstance(type_str, str)):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr) | @functools.lru_cache(maxsize=None)
def parse(self, type_str):
'\n Parses a type string into an appropriate instance of\n :class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,\n throws :class:`~eth_abi.exceptions.ParseError`.\n\n :param type_str: The type string to be parsed.\n :returns: An instance of :class:`~eth_abi.grammar.ABIType` containing\n information about the parsed type string.\n '
if (not isinstance(type_str, str)):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr)<|docstring|>Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string.<|endoftext|> |
a00bc9f24bde32eb14853b5170d7ee0a72d755859c9fe5d41eb2656595ebc5c1 | def to_int(self) -> int:
'转换为整数表示,用于串行化'
mapping = {FrameType.MIN1: 1, FrameType.MIN5: 2, FrameType.MIN15: 3, FrameType.MIN30: 4, FrameType.MIN60: 5, FrameType.DAY: 6, FrameType.WEEK: 7, FrameType.MONTH: 8, FrameType.QUARTER: 9, FrameType.YEAR: 10}
return mapping[self] | 转换为整数表示,用于串行化 | omicron/core/types.py | to_int | evimacs/omicron | 4 | python | def to_int(self) -> int:
mapping = {FrameType.MIN1: 1, FrameType.MIN5: 2, FrameType.MIN15: 3, FrameType.MIN30: 4, FrameType.MIN60: 5, FrameType.DAY: 6, FrameType.WEEK: 7, FrameType.MONTH: 8, FrameType.QUARTER: 9, FrameType.YEAR: 10}
return mapping[self] | def to_int(self) -> int:
mapping = {FrameType.MIN1: 1, FrameType.MIN5: 2, FrameType.MIN15: 3, FrameType.MIN30: 4, FrameType.MIN60: 5, FrameType.DAY: 6, FrameType.WEEK: 7, FrameType.MONTH: 8, FrameType.QUARTER: 9, FrameType.YEAR: 10}
return mapping[self]<|docstring|>转换为整数表示,用于串行化<|endoftext|> |
722d64235f6bf59489110d9def0db7c85adfc652a0fcb3f2c6cde2bc6a2baab5 | @staticmethod
def from_int(frame_type: int) -> 'FrameType':
'将整数表示的`frame_type`转换为`FrameType`类型'
mapping = {1: FrameType.MIN1, 2: FrameType.MIN5, 3: FrameType.MIN15, 4: FrameType.MIN30, 5: FrameType.MIN60, 6: FrameType.DAY, 7: FrameType.WEEK, 8: FrameType.MONTH, 9: FrameType.QUARTER, 10: FrameType.YEAR}
return mapping[frame_type] | 将整数表示的`frame_type`转换为`FrameType`类型 | omicron/core/types.py | from_int | evimacs/omicron | 4 | python | @staticmethod
def from_int(frame_type: int) -> 'FrameType':
mapping = {1: FrameType.MIN1, 2: FrameType.MIN5, 3: FrameType.MIN15, 4: FrameType.MIN30, 5: FrameType.MIN60, 6: FrameType.DAY, 7: FrameType.WEEK, 8: FrameType.MONTH, 9: FrameType.QUARTER, 10: FrameType.YEAR}
return mapping[frame_type] | @staticmethod
def from_int(frame_type: int) -> 'FrameType':
mapping = {1: FrameType.MIN1, 2: FrameType.MIN5, 3: FrameType.MIN15, 4: FrameType.MIN30, 5: FrameType.MIN60, 6: FrameType.DAY, 7: FrameType.WEEK, 8: FrameType.MONTH, 9: FrameType.QUARTER, 10: FrameType.YEAR}
return mapping[frame_type]<|docstring|>将整数表示的`frame_type`转换为`FrameType`类型<|endoftext|> |
c6ba853096c7793898dec68e1cd0e6927895e9f9778b89be11990d813ee1493c | def zero_to_empty(raw_input):
'Return None when entry is 0'
if (utils.is_empty(raw_input) or (raw_input == '0')):
return None
else:
return raw_input | Return None when entry is 0 | django/common/scripts/cleaning/zero_to_empty.py | zero_to_empty | arkhn/fhir-river | 42 | python | def zero_to_empty(raw_input):
if (utils.is_empty(raw_input) or (raw_input == '0')):
return None
else:
return raw_input | def zero_to_empty(raw_input):
if (utils.is_empty(raw_input) or (raw_input == '0')):
return None
else:
return raw_input<|docstring|>Return None when entry is 0<|endoftext|> |
6f587b3b59a5be6846af2249fe4facf4a4f5b826f66403e6aa9b35b78460ecf8 | @abc.abstractmethod
def getType(self):
'获得奖励类型'
pass | 获得奖励类型 | plane_1.0/plane/award.py | getType | misaka46/Aircraft-war | 0 | python | @abc.abstractmethod
def getType(self):
pass | @abc.abstractmethod
def getType(self):
pass<|docstring|>获得奖励类型<|endoftext|> |
86ae589f2514aacddb9a862732e13ef252424e521af317794ebef9a5bca50dcc | def getTime():
'Get time in H:M:S format.'
_bigTime = time.strftime('%H:%M:%S')
return _bigTime | Get time in H:M:S format. | runbot.py | getTime | SuperShadowPlay/MCPD | 0 | python | def getTime():
_bigTime = time.strftime('%H:%M:%S')
return _bigTime | def getTime():
_bigTime = time.strftime('%H:%M:%S')
return _bigTime<|docstring|>Get time in H:M:S format.<|endoftext|> |
47f0f3d2468b540ae526019b302c86a65f469077500d9bbc5284f6f111ac61d4 | async def playerCountUpdate():
'Bot status for player count in the sidebar and output.\n\n The top part of this function is for the sidebar player count,\n the bottom part is for the output channel (if requested).\n '
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
sidebarCount = discord.Game('{0} Players Online'.format(serverStatus.players.online))
(await client.change_presence(status=discord.Status.online, activity=sidebarCount))
if (cEnableNames is True):
mcQuery = mcServer.query()
try:
lastSetOfPlayers
except NameError:
lastSetOfPlayers = 'Notch'
if ((cEnableOutput is True) and (cEnableNames is True)):
if (cDynamicOutput is True):
diffOfPlayers = (mcQuery.players.names != lastSetOfPlayers)
if (diffOfPlayers is True):
lastSetOfPlayers = mcQuery.players.names
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
if (diffOfPlayers is True):
(await cOutputChannel.send(outputMessage))
elif (cDynamicOutput is False):
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
(await cOutputChannel.send(outputMessage))
(await asyncio.sleep(int(cRefresh))) | Bot status for player count in the sidebar and output.
The top part of this function is for the sidebar player count,
the bottom part is for the output channel (if requested). | runbot.py | playerCountUpdate | SuperShadowPlay/MCPD | 0 | python | async def playerCountUpdate():
'Bot status for player count in the sidebar and output.\n\n The top part of this function is for the sidebar player count,\n the bottom part is for the output channel (if requested).\n '
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
sidebarCount = discord.Game('{0} Players Online'.format(serverStatus.players.online))
(await client.change_presence(status=discord.Status.online, activity=sidebarCount))
if (cEnableNames is True):
mcQuery = mcServer.query()
try:
lastSetOfPlayers
except NameError:
lastSetOfPlayers = 'Notch'
if ((cEnableOutput is True) and (cEnableNames is True)):
if (cDynamicOutput is True):
diffOfPlayers = (mcQuery.players.names != lastSetOfPlayers)
if (diffOfPlayers is True):
lastSetOfPlayers = mcQuery.players.names
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
if (diffOfPlayers is True):
(await cOutputChannel.send(outputMessage))
elif (cDynamicOutput is False):
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
(await cOutputChannel.send(outputMessage))
(await asyncio.sleep(int(cRefresh))) | async def playerCountUpdate():
'Bot status for player count in the sidebar and output.\n\n The top part of this function is for the sidebar player count,\n the bottom part is for the output channel (if requested).\n '
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
sidebarCount = discord.Game('{0} Players Online'.format(serverStatus.players.online))
(await client.change_presence(status=discord.Status.online, activity=sidebarCount))
if (cEnableNames is True):
mcQuery = mcServer.query()
try:
lastSetOfPlayers
except NameError:
lastSetOfPlayers = 'Notch'
if ((cEnableOutput is True) and (cEnableNames is True)):
if (cDynamicOutput is True):
diffOfPlayers = (mcQuery.players.names != lastSetOfPlayers)
if (diffOfPlayers is True):
lastSetOfPlayers = mcQuery.players.names
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
if (diffOfPlayers is True):
(await cOutputChannel.send(outputMessage))
elif (cDynamicOutput is False):
if (serverStatus.players.online != 0):
playerNames = ', '.join(mcQuery.players.names)
outputMessage = '\n{0} | {1} Players Online |\n{2}'.format(getTime(), serverStatus.players.online, playerNames)
else:
outputMessage = '{0} | No players online'.format(getTime())
(await cOutputChannel.send(outputMessage))
(await asyncio.sleep(int(cRefresh)))<|docstring|>Bot status for player count in the sidebar and output.
The top part of this function is for the sidebar player count,
the bottom part is for the output channel (if requested).<|endoftext|> |
94d595ed8602675bd47c61113f7e43bf4b2c8b6f71491660dd40991e4c7936d2 | @client.event
async def on_message(message):
'On message portion, most of the actual programming is in this function.'
if (message.author == client.user):
return
msgSplit = message.content.split()
try:
msgSplit[0]
except IndexError:
return
if (cBasePrompt == '0'):
cPrompt = (('<@' + str(client.user.id)) + '>')
else:
cPrompt = cBasePrompt
if (msgSplit[0] == cPrompt):
if (msgSplit[1].lower() == 'help'):
(await message.channel.send('The commands available are:\n{0} Help - Displays this message\n{0} List - List the players online at {1}\n{0} Ping - Ping the bot\n{0} Source - Github Source Code'.format(cPrompt, cIP)))
if (msgSplit[1].lower() == 'ping'):
(await message.channel.send('Pong!'))
print(((("Pong'ed user " + str(message.author)) + ' :: ') + str(getTime())))
if (msgSplit[1].lower() == 'list'):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if (serverStatus.players.online == 0):
if (cSkipNoPlayers is False):
(await message.channel.send(cNoPlayers.format(cIP)))
elif ((cEnableNames is True) and ('{1}' in cMessageSend)):
if (serverStatus.players.online != 0):
onPlayers = serverStatus.players.online
mcQuery = mcServer.query()
(await messagge.channel.send(cMessageSend.format(onPlayers, ', '.join(mcQuery.players.names), cIP)))
else:
(await message.channel.send(cMessageSend.format(serverStatus.players.online)))
if (msgSplit[1].lower() == 'source'):
(await message.channel.send('MCPD v2.0, licensed under the MIT license.\nFull source code at:\nhttps://github.com/SuperShadowPlay/MCPD'))
print(((str(message.author) + ' Requested Source :: ') + getTime())) | On message portion, most of the actual programming is in this function. | runbot.py | on_message | SuperShadowPlay/MCPD | 0 | python | @client.event
async def on_message(message):
if (message.author == client.user):
return
msgSplit = message.content.split()
try:
msgSplit[0]
except IndexError:
return
if (cBasePrompt == '0'):
cPrompt = (('<@' + str(client.user.id)) + '>')
else:
cPrompt = cBasePrompt
if (msgSplit[0] == cPrompt):
if (msgSplit[1].lower() == 'help'):
(await message.channel.send('The commands available are:\n{0} Help - Displays this message\n{0} List - List the players online at {1}\n{0} Ping - Ping the bot\n{0} Source - Github Source Code'.format(cPrompt, cIP)))
if (msgSplit[1].lower() == 'ping'):
(await message.channel.send('Pong!'))
print(((("Pong'ed user " + str(message.author)) + ' :: ') + str(getTime())))
if (msgSplit[1].lower() == 'list'):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if (serverStatus.players.online == 0):
if (cSkipNoPlayers is False):
(await message.channel.send(cNoPlayers.format(cIP)))
elif ((cEnableNames is True) and ('{1}' in cMessageSend)):
if (serverStatus.players.online != 0):
onPlayers = serverStatus.players.online
mcQuery = mcServer.query()
(await messagge.channel.send(cMessageSend.format(onPlayers, ', '.join(mcQuery.players.names), cIP)))
else:
(await message.channel.send(cMessageSend.format(serverStatus.players.online)))
if (msgSplit[1].lower() == 'source'):
(await message.channel.send('MCPD v2.0, licensed under the MIT license.\nFull source code at:\nhttps://github.com/SuperShadowPlay/MCPD'))
print(((str(message.author) + ' Requested Source :: ') + getTime())) | @client.event
async def on_message(message):
if (message.author == client.user):
return
msgSplit = message.content.split()
try:
msgSplit[0]
except IndexError:
return
if (cBasePrompt == '0'):
cPrompt = (('<@' + str(client.user.id)) + '>')
else:
cPrompt = cBasePrompt
if (msgSplit[0] == cPrompt):
if (msgSplit[1].lower() == 'help'):
(await message.channel.send('The commands available are:\n{0} Help - Displays this message\n{0} List - List the players online at {1}\n{0} Ping - Ping the bot\n{0} Source - Github Source Code'.format(cPrompt, cIP)))
if (msgSplit[1].lower() == 'ping'):
(await message.channel.send('Pong!'))
print(((("Pong'ed user " + str(message.author)) + ' :: ') + str(getTime())))
if (msgSplit[1].lower() == 'list'):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if (serverStatus.players.online == 0):
if (cSkipNoPlayers is False):
(await message.channel.send(cNoPlayers.format(cIP)))
elif ((cEnableNames is True) and ('{1}' in cMessageSend)):
if (serverStatus.players.online != 0):
onPlayers = serverStatus.players.online
mcQuery = mcServer.query()
(await messagge.channel.send(cMessageSend.format(onPlayers, ', '.join(mcQuery.players.names), cIP)))
else:
(await message.channel.send(cMessageSend.format(serverStatus.players.online)))
if (msgSplit[1].lower() == 'source'):
(await message.channel.send('MCPD v2.0, licensed under the MIT license.\nFull source code at:\nhttps://github.com/SuperShadowPlay/MCPD'))
print(((str(message.author) + ' Requested Source :: ') + getTime()))<|docstring|>On message portion, most of the actual programming is in this function.<|endoftext|> |
c1b4bc6b92b1a798c9ccf7232660053180b9d0fba575d8c7a44d22fd4a8d62d3 | async def printStatus():
'Print the updating status to the console.'
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if ((cEnableNames is True) and ('{1}' in cMessageSend) and (serverStatus.players.online != 0)):
mcQuery = mcServer.query()
if (serverStatus.players.online == 1):
print('{0} Player :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
else:
print('{0} Players :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
elif (serverStatus.players.online == 1):
print('{0} Player :: {1}'.format(serverStatus.players.online, getTime()))
else:
print('{0} Players :: {1}'.format(serverStatus.players.online, getTime()))
(await asyncio.sleep(int(cRefresh))) | Print the updating status to the console. | runbot.py | printStatus | SuperShadowPlay/MCPD | 0 | python | async def printStatus():
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if ((cEnableNames is True) and ('{1}' in cMessageSend) and (serverStatus.players.online != 0)):
mcQuery = mcServer.query()
if (serverStatus.players.online == 1):
print('{0} Player :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
else:
print('{0} Players :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
elif (serverStatus.players.online == 1):
print('{0} Player :: {1}'.format(serverStatus.players.online, getTime()))
else:
print('{0} Players :: {1}'.format(serverStatus.players.online, getTime()))
(await asyncio.sleep(int(cRefresh))) | async def printStatus():
(await client.wait_until_ready())
while (not client.is_closed()):
mcServer = MinecraftServer(cIP, cPort)
serverStatus = mcServer.status()
if ((cEnableNames is True) and ('{1}' in cMessageSend) and (serverStatus.players.online != 0)):
mcQuery = mcServer.query()
if (serverStatus.players.online == 1):
print('{0} Player :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
else:
print('{0} Players :: {2} :: {1}'.format(serverStatus.players.online, ', '.join(mcQuery.players.names), getTime()))
elif (serverStatus.players.online == 1):
print('{0} Player :: {1}'.format(serverStatus.players.online, getTime()))
else:
print('{0} Players :: {1}'.format(serverStatus.players.online, getTime()))
(await asyncio.sleep(int(cRefresh)))<|docstring|>Print the updating status to the console.<|endoftext|> |
32be21fc59851e04c2a7a379b8bae07b1e100a2f7bc963fbedc6e65837755517 | @client.event
async def on_ready():
'Log in and other such wonders.'
print('Logged in as:')
print(client.user.name)
print(client.user.id)
if (cBasePrompt == '0'):
cPromptText = ('@' + client.user.name)
else:
cPromptText = str(cBasePrompt)
print(('Prompt: ' + cPromptText))
print('------') | Log in and other such wonders. | runbot.py | on_ready | SuperShadowPlay/MCPD | 0 | python | @client.event
async def on_ready():
print('Logged in as:')
print(client.user.name)
print(client.user.id)
if (cBasePrompt == '0'):
cPromptText = ('@' + client.user.name)
else:
cPromptText = str(cBasePrompt)
print(('Prompt: ' + cPromptText))
print('------') | @client.event
async def on_ready():
print('Logged in as:')
print(client.user.name)
print(client.user.id)
if (cBasePrompt == '0'):
cPromptText = ('@' + client.user.name)
else:
cPromptText = str(cBasePrompt)
print(('Prompt: ' + cPromptText))
print('------')<|docstring|>Log in and other such wonders.<|endoftext|> |
6472b01902a6f33901061fa1f1b3fb33370791e0f9d2f68d6f9894995cefb7a3 | def predict_keras(img, alpha, rows):
'\n params: img: an input image with shape (1, 224, 224, 3)\n note: Image has been preprocessed (x /= 127.5 - 1)\n Runs forward pass on network and returns logits and the inference time\n '
input_tensor = Input(shape=(rows, rows, 3))
model = MobileNetV2(input_tensor=input_tensor, include_top=True, weights='imagenet', alpha=alpha)
tic = time.time()
y_pred = model.predict(img.astype(np.float32))
y_pred = y_pred[0].ravel()
toc = time.time()
return (y_pred, (toc - tic)) | params: img: an input image with shape (1, 224, 224, 3)
note: Image has been preprocessed (x /= 127.5 - 1)
Runs forward pass on network and returns logits and the inference time | test_mobilenet.py | predict_keras | JonathanCMitchell/mobilenet_v2_keras | 94 | python | def predict_keras(img, alpha, rows):
'\n params: img: an input image with shape (1, 224, 224, 3)\n note: Image has been preprocessed (x /= 127.5 - 1)\n Runs forward pass on network and returns logits and the inference time\n '
input_tensor = Input(shape=(rows, rows, 3))
model = MobileNetV2(input_tensor=input_tensor, include_top=True, weights='imagenet', alpha=alpha)
tic = time.time()
y_pred = model.predict(img.astype(np.float32))
y_pred = y_pred[0].ravel()
toc = time.time()
return (y_pred, (toc - tic)) | def predict_keras(img, alpha, rows):
'\n params: img: an input image with shape (1, 224, 224, 3)\n note: Image has been preprocessed (x /= 127.5 - 1)\n Runs forward pass on network and returns logits and the inference time\n '
input_tensor = Input(shape=(rows, rows, 3))
model = MobileNetV2(input_tensor=input_tensor, include_top=True, weights='imagenet', alpha=alpha)
tic = time.time()
y_pred = model.predict(img.astype(np.float32))
y_pred = y_pred[0].ravel()
toc = time.time()
return (y_pred, (toc - tic))<|docstring|>params: img: an input image with shape (1, 224, 224, 3)
note: Image has been preprocessed (x /= 127.5 - 1)
Runs forward pass on network and returns logits and the inference time<|endoftext|> |
9e1850770b4fe4aaf6bc5613fac9a1e77264254f0c5a08e3b79054354ab753cf | def predict_slim(img, checkpoint, rows):
'\n params: img: a preprocessed image with shape (1, 224, 224, 3)\n checkpoint: the path to the frozen.pb checkpoint\n Runs a forward pass of the tensorflow slim mobilenetV2 model which has been frozen for inference\n returns: numpy array x, which are the logits, and the inference time\n '
gd = tf.GraphDef.FromString(open((checkpoint + '_frozen.pb'), 'rb').read())
(inp, predictions) = tf.import_graph_def(gd, return_elements=['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
with tf.Session(graph=inp.graph):
tic = time.time()
y_pred = predictions.eval(feed_dict={inp: img})
y_pred = y_pred[0].ravel()
y_pred = (y_pred[1:] / y_pred[1:].sum())
toc = time.time()
return (y_pred, (toc - tic)) | params: img: a preprocessed image with shape (1, 224, 224, 3)
checkpoint: the path to the frozen.pb checkpoint
Runs a forward pass of the tensorflow slim mobilenetV2 model which has been frozen for inference
returns: numpy array x, which are the logits, and the inference time | test_mobilenet.py | predict_slim | JonathanCMitchell/mobilenet_v2_keras | 94 | python | def predict_slim(img, checkpoint, rows):
'\n params: img: a preprocessed image with shape (1, 224, 224, 3)\n checkpoint: the path to the frozen.pb checkpoint\n Runs a forward pass of the tensorflow slim mobilenetV2 model which has been frozen for inference\n returns: numpy array x, which are the logits, and the inference time\n '
gd = tf.GraphDef.FromString(open((checkpoint + '_frozen.pb'), 'rb').read())
(inp, predictions) = tf.import_graph_def(gd, return_elements=['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
with tf.Session(graph=inp.graph):
tic = time.time()
y_pred = predictions.eval(feed_dict={inp: img})
y_pred = y_pred[0].ravel()
y_pred = (y_pred[1:] / y_pred[1:].sum())
toc = time.time()
return (y_pred, (toc - tic)) | def predict_slim(img, checkpoint, rows):
'\n params: img: a preprocessed image with shape (1, 224, 224, 3)\n checkpoint: the path to the frozen.pb checkpoint\n Runs a forward pass of the tensorflow slim mobilenetV2 model which has been frozen for inference\n returns: numpy array x, which are the logits, and the inference time\n '
gd = tf.GraphDef.FromString(open((checkpoint + '_frozen.pb'), 'rb').read())
(inp, predictions) = tf.import_graph_def(gd, return_elements=['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
with tf.Session(graph=inp.graph):
tic = time.time()
y_pred = predictions.eval(feed_dict={inp: img})
y_pred = y_pred[0].ravel()
y_pred = (y_pred[1:] / y_pred[1:].sum())
toc = time.time()
return (y_pred, (toc - tic))<|docstring|>params: img: a preprocessed image with shape (1, 224, 224, 3)
checkpoint: the path to the frozen.pb checkpoint
Runs a forward pass of the tensorflow slim mobilenetV2 model which has been frozen for inference
returns: numpy array x, which are the logits, and the inference time<|endoftext|> |
d9193621a5c0b80ca43c8457e63fda0d7b4876e56595907c050cbe462a32d4ed | def add(self, source, destination, port):
'\n Adds a route from "source" to "destination".\n '
return self.paths.add(source, destination, port) | Adds a route from "source" to "destination". | cloudless/providers/aws_mock/paths.py | add | getcloudless/cloudless | 8 | python | def add(self, source, destination, port):
'\n \n '
return self.paths.add(source, destination, port) | def add(self, source, destination, port):
'\n \n '
return self.paths.add(source, destination, port)<|docstring|>Adds a route from "source" to "destination".<|endoftext|> |
008bdb9384b033d1a79e7a94a7ad56c3725458c4d3d3282a883cde727fe71df2 | def remove(self, source, destination, port):
'\n Remove a route from "source" to "destination".\n '
return self.paths.remove(source, destination, port) | Remove a route from "source" to "destination". | cloudless/providers/aws_mock/paths.py | remove | getcloudless/cloudless | 8 | python | def remove(self, source, destination, port):
'\n \n '
return self.paths.remove(source, destination, port) | def remove(self, source, destination, port):
'\n \n '
return self.paths.remove(source, destination, port)<|docstring|>Remove a route from "source" to "destination".<|endoftext|> |
64879b32cfc4cca7a06e52ba9d65da69c88af68b4de1aea4a9178dadaa4088ea | def list(self):
'\n List all paths and return a dictionary structure representing a graph.\n '
return self.paths.list() | List all paths and return a dictionary structure representing a graph. | cloudless/providers/aws_mock/paths.py | list | getcloudless/cloudless | 8 | python | def list(self):
'\n \n '
return self.paths.list() | def list(self):
'\n \n '
return self.paths.list()<|docstring|>List all paths and return a dictionary structure representing a graph.<|endoftext|> |
8b8f4c61383f9b5a4ef0e1cb10055c59dda2afb60b05ef1f6a7493cb2f6c7bf7 | def internet_accessible(self, service, port):
'\n Return true if the given service is accessible on the internet.\n '
return self.paths.internet_accessible(service, port) | Return true if the given service is accessible on the internet. | cloudless/providers/aws_mock/paths.py | internet_accessible | getcloudless/cloudless | 8 | python | def internet_accessible(self, service, port):
'\n \n '
return self.paths.internet_accessible(service, port) | def internet_accessible(self, service, port):
'\n \n '
return self.paths.internet_accessible(service, port)<|docstring|>Return true if the given service is accessible on the internet.<|endoftext|> |
493943e53ad53265737004aceb9a3a8df8aa1e325b646686725c53d0200bd6db | def has_access(self, source, destination, port):
'\n Return true if there is a route from "source" to "destination".\n '
return self.paths.has_access(source, destination, port) | Return true if there is a route from "source" to "destination". | cloudless/providers/aws_mock/paths.py | has_access | getcloudless/cloudless | 8 | python | def has_access(self, source, destination, port):
'\n \n '
return self.paths.has_access(source, destination, port) | def has_access(self, source, destination, port):
'\n \n '
return self.paths.has_access(source, destination, port)<|docstring|>Return true if there is a route from "source" to "destination".<|endoftext|> |
d7f9a1827bd39a0e9abe4e221e617d39b97b9f964094ab775cde86bd2a1434dc | def get_full_name(self):
"\n Returns the user's fullname\n "
return self.fullname | Returns the user's fullname | users/models.py | get_full_name | rnovec/petgram-api | 1 | python | def get_full_name(self):
"\n \n "
return self.fullname | def get_full_name(self):
"\n \n "
return self.fullname<|docstring|>Returns the user's fullname<|endoftext|> |
a6fb138b7b5a4c52b11e88ba00e59200e53541fc634bbedf670926d5f24b25db | def __init__(self, lrkey=None, **kwargs):
'\n define local vars and send all other (keyworded) arguments to parent.\n '
if (lrkey is None):
print('{}: using default value of lapse rate ({})'.format(self.__class__.__name__, self.lrkey))
else:
self.lrkey = lrkey
super().__init__(**kwargs) | define local vars and send all other (keyworded) arguments to parent. | solver/rce.py | __init__ | msmithsm/rce | 0 | python | def __init__(self, lrkey=None, **kwargs):
'\n \n '
if (lrkey is None):
print('{}: using default value of lapse rate ({})'.format(self.__class__.__name__, self.lrkey))
else:
self.lrkey = lrkey
super().__init__(**kwargs) | def __init__(self, lrkey=None, **kwargs):
'\n \n '
if (lrkey is None):
print('{}: using default value of lapse rate ({})'.format(self.__class__.__name__, self.lrkey))
else:
self.lrkey = lrkey
super().__init__(**kwargs)<|docstring|>define local vars and send all other (keyworded) arguments to parent.<|endoftext|> |
d51eae68e2bd30a6637ab3965444c8d4e33d59e937d1f4da33def7ae4587b057 | def _convectiveadjustment(self, atms, flx, hr):
'\n Apply convective adjustment\n '
tconv = self._lr(atms, self.lrkey)
trad = atms.t.copy()
atms.t = np.where((atms.p >= atms._ttl_pmax), tconv, np.maximum(tconv, atms.t))
try:
iconv_top = (np.argwhere((tconv >= trad)).min() - 1)
except ValueError:
iconv_top = (len(atms) - 1)
finally:
atms.iconv = iconv_top
return (atms, flx, hr) | Apply convective adjustment | solver/rce.py | _convectiveadjustment | msmithsm/rce | 0 | python | def _convectiveadjustment(self, atms, flx, hr):
'\n \n '
tconv = self._lr(atms, self.lrkey)
trad = atms.t.copy()
atms.t = np.where((atms.p >= atms._ttl_pmax), tconv, np.maximum(tconv, atms.t))
try:
iconv_top = (np.argwhere((tconv >= trad)).min() - 1)
except ValueError:
iconv_top = (len(atms) - 1)
finally:
atms.iconv = iconv_top
return (atms, flx, hr) | def _convectiveadjustment(self, atms, flx, hr):
'\n \n '
tconv = self._lr(atms, self.lrkey)
trad = atms.t.copy()
atms.t = np.where((atms.p >= atms._ttl_pmax), tconv, np.maximum(tconv, atms.t))
try:
iconv_top = (np.argwhere((tconv >= trad)).min() - 1)
except ValueError:
iconv_top = (len(atms) - 1)
finally:
atms.iconv = iconv_top
return (atms, flx, hr)<|docstring|>Apply convective adjustment<|endoftext|> |
b6f78b3d6732fa5776f148a2b5ae026acc0a8eccadab1c736b24bc26dcc43cba | def get_data_as_dataframe(self, start_date: datetime, end_date: datetime, method: str, aggregate: str, query: str):
'\n Returns data corresponding to the given query and from the specified time period.\n '
start_date_str = start_date.strftime('%Y.%m.%d %H:00:00')
end_date_str = end_date.strftime('%Y.%m.%d %H:00:00')
period_params = f"(IP_FROM_TIME='{start_date_str}',IP_TO_TIME='{end_date_str}',AGGREGATION_LEVEL='{aggregate}')"
url = (f'{self.sap_service_url}/{self.service}.xsodata/{method}{period_params}/' + f'Execute?$format=json&$select={query}')
headers = {'Authorization': f'Basic {self.base64_auth_api_key}'}
response = requests.get(url, headers=headers)
if response.ok:
data = response.json()
dataframe = pd.DataFrame(data['d']['results'])
if ('__metadata' in dataframe.columns):
dataframe.drop('__metadata', axis=1, inplace=True)
return dataframe
else:
return None | Returns data corresponding to the given query and from the specified time period. | src/osiris/adapters/sap_service.py | get_data_as_dataframe | Open-Dataplatform/osiris-sdk | 1 | python | def get_data_as_dataframe(self, start_date: datetime, end_date: datetime, method: str, aggregate: str, query: str):
'\n \n '
start_date_str = start_date.strftime('%Y.%m.%d %H:00:00')
end_date_str = end_date.strftime('%Y.%m.%d %H:00:00')
period_params = f"(IP_FROM_TIME='{start_date_str}',IP_TO_TIME='{end_date_str}',AGGREGATION_LEVEL='{aggregate}')"
url = (f'{self.sap_service_url}/{self.service}.xsodata/{method}{period_params}/' + f'Execute?$format=json&$select={query}')
headers = {'Authorization': f'Basic {self.base64_auth_api_key}'}
response = requests.get(url, headers=headers)
if response.ok:
data = response.json()
dataframe = pd.DataFrame(data['d']['results'])
if ('__metadata' in dataframe.columns):
dataframe.drop('__metadata', axis=1, inplace=True)
return dataframe
else:
return None | def get_data_as_dataframe(self, start_date: datetime, end_date: datetime, method: str, aggregate: str, query: str):
'\n \n '
start_date_str = start_date.strftime('%Y.%m.%d %H:00:00')
end_date_str = end_date.strftime('%Y.%m.%d %H:00:00')
period_params = f"(IP_FROM_TIME='{start_date_str}',IP_TO_TIME='{end_date_str}',AGGREGATION_LEVEL='{aggregate}')"
url = (f'{self.sap_service_url}/{self.service}.xsodata/{method}{period_params}/' + f'Execute?$format=json&$select={query}')
headers = {'Authorization': f'Basic {self.base64_auth_api_key}'}
response = requests.get(url, headers=headers)
if response.ok:
data = response.json()
dataframe = pd.DataFrame(data['d']['results'])
if ('__metadata' in dataframe.columns):
dataframe.drop('__metadata', axis=1, inplace=True)
return dataframe
else:
return None<|docstring|>Returns data corresponding to the given query and from the specified time period.<|endoftext|> |
84c3cd85d5c6f616b9eacd63580b492236f880efa554645108e8bc7910b3e8b7 | def Equals(self, *__args):
'\n Equals(self: HierarchicalVirtualizationConstraints,comparisonConstraints: HierarchicalVirtualizationConstraints) -> bool\n\n Equals(self: HierarchicalVirtualizationConstraints,oCompare: object) -> bool\n '
pass | Equals(self: HierarchicalVirtualizationConstraints,comparisonConstraints: HierarchicalVirtualizationConstraints) -> bool
Equals(self: HierarchicalVirtualizationConstraints,oCompare: object) -> bool | release/stubs.min/System/Windows/Controls/__init___parts/HierarchicalVirtualizationConstraints.py | Equals | htlcnn/ironpython-stubs | 182 | python | def Equals(self, *__args):
'\n Equals(self: HierarchicalVirtualizationConstraints,comparisonConstraints: HierarchicalVirtualizationConstraints) -> bool\n\n Equals(self: HierarchicalVirtualizationConstraints,oCompare: object) -> bool\n '
pass | def Equals(self, *__args):
'\n Equals(self: HierarchicalVirtualizationConstraints,comparisonConstraints: HierarchicalVirtualizationConstraints) -> bool\n\n Equals(self: HierarchicalVirtualizationConstraints,oCompare: object) -> bool\n '
pass<|docstring|>Equals(self: HierarchicalVirtualizationConstraints,comparisonConstraints: HierarchicalVirtualizationConstraints) -> bool
Equals(self: HierarchicalVirtualizationConstraints,oCompare: object) -> bool<|endoftext|> |
a85998ec79c86fad76ba28b3f608ac7b259a02812801341eee32682bb060823a | def GetHashCode(self):
' GetHashCode(self: HierarchicalVirtualizationConstraints) -> int '
pass | GetHashCode(self: HierarchicalVirtualizationConstraints) -> int | release/stubs.min/System/Windows/Controls/__init___parts/HierarchicalVirtualizationConstraints.py | GetHashCode | htlcnn/ironpython-stubs | 182 | python | def GetHashCode(self):
' '
pass | def GetHashCode(self):
' '
pass<|docstring|>GetHashCode(self: HierarchicalVirtualizationConstraints) -> int<|endoftext|> |
afbec9d4036cd220dba8b92f655293f0f39188798750e21688134b4cd99b3518 | def __eq__(self, *args):
' x.__eq__(y) <==> x==y '
pass | x.__eq__(y) <==> x==y | release/stubs.min/System/Windows/Controls/__init___parts/HierarchicalVirtualizationConstraints.py | __eq__ | htlcnn/ironpython-stubs | 182 | python | def __eq__(self, *args):
' '
pass | def __eq__(self, *args):
' '
pass<|docstring|>x.__eq__(y) <==> x==y<|endoftext|> |
dcccb41d53b52c0ad8d56c0d3897f5fbdcc293dc83dc2c2e3e0daf8bb9724e0c | @staticmethod
def __new__(self, cacheLength, cacheLengthUnit, viewport):
'\n __new__(cls: type,cacheLength: VirtualizationCacheLength,cacheLengthUnit: VirtualizationCacheLengthUnit,viewport: Rect)\n\n __new__[HierarchicalVirtualizationConstraints]() -> HierarchicalVirtualizationConstraints\n '
pass | __new__(cls: type,cacheLength: VirtualizationCacheLength,cacheLengthUnit: VirtualizationCacheLengthUnit,viewport: Rect)
__new__[HierarchicalVirtualizationConstraints]() -> HierarchicalVirtualizationConstraints | release/stubs.min/System/Windows/Controls/__init___parts/HierarchicalVirtualizationConstraints.py | __new__ | htlcnn/ironpython-stubs | 182 | python | @staticmethod
def __new__(self, cacheLength, cacheLengthUnit, viewport):
'\n __new__(cls: type,cacheLength: VirtualizationCacheLength,cacheLengthUnit: VirtualizationCacheLengthUnit,viewport: Rect)\n\n __new__[HierarchicalVirtualizationConstraints]() -> HierarchicalVirtualizationConstraints\n '
pass | @staticmethod
def __new__(self, cacheLength, cacheLengthUnit, viewport):
'\n __new__(cls: type,cacheLength: VirtualizationCacheLength,cacheLengthUnit: VirtualizationCacheLengthUnit,viewport: Rect)\n\n __new__[HierarchicalVirtualizationConstraints]() -> HierarchicalVirtualizationConstraints\n '
pass<|docstring|>__new__(cls: type,cacheLength: VirtualizationCacheLength,cacheLengthUnit: VirtualizationCacheLengthUnit,viewport: Rect)
__new__[HierarchicalVirtualizationConstraints]() -> HierarchicalVirtualizationConstraints<|endoftext|> |
450ea4cbd9edeb66ffd2ced779ad7470dbe2f0845b1fdb03f1b912a6c980178f | def normalize_answer(s):
'Lower text and remove punctuation, articles and extra whitespace.'
def remove_articles(text):
return re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join((ch for ch in text if (ch not in exclude)))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | Lower text and remove punctuation, articles and extra whitespace. | evals/eval_xor_engspan.py | normalize_answer | gowtham1997/XORQA | 62 | python | def normalize_answer(s):
def remove_articles(text):
return re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return .join((ch for ch in text if (ch not in exclude)))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | def normalize_answer(s):
def remove_articles(text):
return re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return .join((ch for ch in text if (ch not in exclude)))
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))<|docstring|>Lower text and remove punctuation, articles and extra whitespace.<|endoftext|> |
c5db299caeb94ecdf9e51d70e2c050385506eb16e38d7f990991ab1aa5ef0723 | @pytest.fixture
def klass():
'Provide the CUT.'
from agile_analytics import LeadTimeDistributionReporter
return LeadTimeDistributionReporter | Provide the CUT. | tests/test_lead_reporter.py | klass | cmheisel/jira-agile-extractor | 14 | python | @pytest.fixture
def klass():
from agile_analytics import LeadTimeDistributionReporter
return LeadTimeDistributionReporter | @pytest.fixture
def klass():
from agile_analytics import LeadTimeDistributionReporter
return LeadTimeDistributionReporter<|docstring|>Provide the CUT.<|endoftext|> |
9424a06aa4861f46f8a3d9c9c0957718fec7f30c1ab802246bd67c6ce8699bb7 | def test_klass(klass):
'Ensure the fixture works.'
assert klass | Ensure the fixture works. | tests/test_lead_reporter.py | test_klass | cmheisel/jira-agile-extractor | 14 | python | def test_klass(klass):
assert klass | def test_klass(klass):
assert klass<|docstring|>Ensure the fixture works.<|endoftext|> |
ac8f212265ad8c18d6f5ee989f4c2d7c4c665f19680e0010bf7f3f326679dfdf | def test_date_selection(klass, datetime, tzutc):
'Ensure the CUT picks Sunday-Saturday date range'
r = klass('Foo')
r.start_date = datetime(2016, 5, 21, 0, 0, 0)
r.end_date = datetime(2016, 6, 21, 11, 59, 59)
assert (r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc))
assert (r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc)) | Ensure the CUT picks Sunday-Saturday date range | tests/test_lead_reporter.py | test_date_selection | cmheisel/jira-agile-extractor | 14 | python | def test_date_selection(klass, datetime, tzutc):
r = klass('Foo')
r.start_date = datetime(2016, 5, 21, 0, 0, 0)
r.end_date = datetime(2016, 6, 21, 11, 59, 59)
assert (r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc))
assert (r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc)) | def test_date_selection(klass, datetime, tzutc):
r = klass('Foo')
r.start_date = datetime(2016, 5, 21, 0, 0, 0)
r.end_date = datetime(2016, 6, 21, 11, 59, 59)
assert (r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc))
assert (r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc))<|docstring|>Ensure the CUT picks Sunday-Saturday date range<|endoftext|> |
84432e1478c427b17a603382771672aec43d9c7a0251227bd731d88b58d5f48c | def test_filter(klass, days_agos, AnalyzedAgileTicket, tzutc):
'filter_issues ignores issues completed before the specified range.'
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
issue_out_of_range = AnalyzedAgileTicket(key='TEST-OOR', committed=dict(state='Committed', entered_at=days_agos[42]), started=dict(state='Started', entered_at=days_agos[44]), ended=dict(state='Ended', entered_at=days_agos[45]))
issue_list.append(issue_out_of_range)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
filtered_issues = r.filter_issues(issue_list)
assert (r.start_date > issue_out_of_range.ended['entered_at'])
assert (len(filtered_issues) == 2) | filter_issues ignores issues completed before the specified range. | tests/test_lead_reporter.py | test_filter | cmheisel/jira-agile-extractor | 14 | python | def test_filter(klass, days_agos, AnalyzedAgileTicket, tzutc):
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
issue_out_of_range = AnalyzedAgileTicket(key='TEST-OOR', committed=dict(state='Committed', entered_at=days_agos[42]), started=dict(state='Started', entered_at=days_agos[44]), ended=dict(state='Ended', entered_at=days_agos[45]))
issue_list.append(issue_out_of_range)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
filtered_issues = r.filter_issues(issue_list)
assert (r.start_date > issue_out_of_range.ended['entered_at'])
assert (len(filtered_issues) == 2) | def test_filter(klass, days_agos, AnalyzedAgileTicket, tzutc):
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
issue_out_of_range = AnalyzedAgileTicket(key='TEST-OOR', committed=dict(state='Committed', entered_at=days_agos[42]), started=dict(state='Started', entered_at=days_agos[44]), ended=dict(state='Ended', entered_at=days_agos[45]))
issue_list.append(issue_out_of_range)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
filtered_issues = r.filter_issues(issue_list)
assert (r.start_date > issue_out_of_range.ended['entered_at'])
assert (len(filtered_issues) == 2)<|docstring|>filter_issues ignores issues completed before the specified range.<|endoftext|> |
2ab3d91cc44ae60359b7fa30f6b3e093199dd709532aa9a3c446723d16c584f7 | def test_report_summary(klass, datetime, tzutc):
'report_on returns an object with meta data.'
start_date = datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc)
end_date = datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
expected = dict(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
assert (r.report_on([]).summary == expected) | report_on returns an object with meta data. | tests/test_lead_reporter.py | test_report_summary | cmheisel/jira-agile-extractor | 14 | python | def test_report_summary(klass, datetime, tzutc):
start_date = datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc)
end_date = datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
expected = dict(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
assert (r.report_on([]).summary == expected) | def test_report_summary(klass, datetime, tzutc):
start_date = datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc)
end_date = datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc)
r = klass(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
expected = dict(title='Cycle Time Distribution Past 30 days', start_date=start_date, end_date=end_date)
assert (r.report_on([]).summary == expected)<|docstring|>report_on returns an object with meta data.<|endoftext|> |
5a3c1bc1550630676dab900a1d726c7488f3e29a942f65df713faec614ad836f | def test_report_table_empty(klass, days_agos):
'Ensure an empty list of tickets is handled.'
expected = [['Lead Time', 'Tickets']]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on([])
assert (report.table == expected) | Ensure an empty list of tickets is handled. | tests/test_lead_reporter.py | test_report_table_empty | cmheisel/jira-agile-extractor | 14 | python | def test_report_table_empty(klass, days_agos):
expected = [['Lead Time', 'Tickets']]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on([])
assert (report.table == expected) | def test_report_table_empty(klass, days_agos):
expected = [['Lead Time', 'Tickets']]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on([])
assert (report.table == expected)<|docstring|>Ensure an empty list of tickets is handled.<|endoftext|> |
c8e27c200a41bb2411e85ea843a9578d199247fb2ecc47575abe1d68a1042574 | def test_report_table(klass, days_agos, AnalyzedAgileTicket, tzutc):
'report_on returns an object with a tabular represenation of the data'
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(4, 10):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[5]), started=dict(state='Started', entered_at=days_agos[4]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(11, 13):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[10]), started=dict(state='Started', entered_at=days_agos[9]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
expected = [['Lead Time', 'Tickets'], [1, 0], [2, 2], [3, 0], [4, 0], [5, 6], [6, 0], [7, 0], [8, 0], [9, 0], [10, 2]]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on(issue_list)
assert (report.table == expected) | report_on returns an object with a tabular represenation of the data | tests/test_lead_reporter.py | test_report_table | cmheisel/jira-agile-extractor | 14 | python | def test_report_table(klass, days_agos, AnalyzedAgileTicket, tzutc):
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(4, 10):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[5]), started=dict(state='Started', entered_at=days_agos[4]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(11, 13):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[10]), started=dict(state='Started', entered_at=days_agos[9]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
expected = [['Lead Time', 'Tickets'], [1, 0], [2, 2], [3, 0], [4, 0], [5, 6], [6, 0], [7, 0], [8, 0], [9, 0], [10, 2]]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on(issue_list)
assert (report.table == expected) | def test_report_table(klass, days_agos, AnalyzedAgileTicket, tzutc):
issue_list_kwargs = []
for i in range(1, 3):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[2]), started=dict(state='Started', entered_at=days_agos[2]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(4, 10):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[5]), started=dict(state='Started', entered_at=days_agos[4]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
for i in range(11, 13):
kwargs = dict(key='TEST-{}'.format(i), committed=dict(state='Committed', entered_at=days_agos[10]), started=dict(state='Started', entered_at=days_agos[9]), ended=dict(state='Ended', entered_at=days_agos[0]))
issue_list_kwargs.append(kwargs)
issue_list = [AnalyzedAgileTicket(**kwargs) for kwargs in issue_list_kwargs]
expected = [['Lead Time', 'Tickets'], [1, 0], [2, 2], [3, 0], [4, 0], [5, 6], [6, 0], [7, 0], [8, 0], [9, 0], [10, 2]]
r = klass(title='Cycle Time Distribution Past 30 days', start_date=days_agos[30], end_date=days_agos[0])
report = r.report_on(issue_list)
assert (report.table == expected)<|docstring|>report_on returns an object with a tabular represenation of the data<|endoftext|> |
0c52d0236b1f8069f3dfb9e2c60220c27725dcf2d7ff9c3d270f0c3454b935e9 | def search(strings, chars):
"Given a sequence of strings and an iterator of chars, return True\n if any of the strings would be a prefix of ''.join(chars); but\n only consume chars up to the end of the match."
raise NotImplementedError | Given a sequence of strings and an iterator of chars, return True
if any of the strings would be a prefix of ''.join(chars); but
only consume chars up to the end of the match. | reference/regexercise/literals.py | search | JaDogg/__py_playground | 1 | python | def search(strings, chars):
"Given a sequence of strings and an iterator of chars, return True\n if any of the strings would be a prefix of .join(chars); but\n only consume chars up to the end of the match."
raise NotImplementedError | def search(strings, chars):
"Given a sequence of strings and an iterator of chars, return True\n if any of the strings would be a prefix of .join(chars); but\n only consume chars up to the end of the match."
raise NotImplementedError<|docstring|>Given a sequence of strings and an iterator of chars, return True
if any of the strings would be a prefix of ''.join(chars); but
only consume chars up to the end of the match.<|endoftext|> |
c641cf0e82c19ea9a1e30af1e307bb646f5e4bbe03110af57db10326eef7db1d | def split_dataset(dataset: pd.DataFrame) -> Tuple[(pd.DataFrame, pd.DataFrame)]:
'Split dataset into training and validation datasets, based on the "train" column'
training = dataset[dataset['train']]
validation = dataset[(~ dataset['train'])]
return (training, validation) | Split dataset into training and validation datasets, based on the "train" column | kitt/dataset.py | split_dataset | David-Ciz/kitt | 2 | python | def split_dataset(dataset: pd.DataFrame) -> Tuple[(pd.DataFrame, pd.DataFrame)]:
training = dataset[dataset['train']]
validation = dataset[(~ dataset['train'])]
return (training, validation) | def split_dataset(dataset: pd.DataFrame) -> Tuple[(pd.DataFrame, pd.DataFrame)]:
training = dataset[dataset['train']]
validation = dataset[(~ dataset['train'])]
return (training, validation)<|docstring|>Split dataset into training and validation datasets, based on the "train" column<|endoftext|> |
ebe8417f117d198fd7c37ab63a0a5153b5af5f7d6ed6f3ef5b6d32112d489def | def edit_gui_py(content, html_id):
'Function that will change some element in html GUI and is callable from other py scripts.\n\n Args:\n content (str): New content.\n html_id (str): Id of changed element.\n '
import eel
eel.edit_gui_js(content, html_id) | Function that will change some element in html GUI and is callable from other py scripts.
Args:
content (str): New content.
html_id (str): Id of changed element. | predictit/gui_start.py | edit_gui_py | Malachov/predict-it | 7 | python | def edit_gui_py(content, html_id):
'Function that will change some element in html GUI and is callable from other py scripts.\n\n Args:\n content (str): New content.\n html_id (str): Id of changed element.\n '
import eel
eel.edit_gui_js(content, html_id) | def edit_gui_py(content, html_id):
'Function that will change some element in html GUI and is callable from other py scripts.\n\n Args:\n content (str): New content.\n html_id (str): Id of changed element.\n '
import eel
eel.edit_gui_js(content, html_id)<|docstring|>Function that will change some element in html GUI and is callable from other py scripts.
Args:
content (str): New content.
html_id (str): Id of changed element.<|endoftext|> |
574d41598c9060ec5fbd57d611a679b79c75f59401ddc20ec85500f8294c7cc9 | def run_gui():
'Start web based GUI.'
import eel
web_path = str((Path(__file__).resolve().parents[0] / 'files_for_GUI'))
eel.init(web_path)
this_path = Path(__file__).resolve().parents[1]
this_path_string = str(this_path)
sys.path.insert(0, this_path_string)
config = predictit.config
predictit.misc.GLOBAL_VARS.GUI = 1
config.update({'show_plot': False, 'save_plot': False, 'data': None, 'table_settings': {'tablefmt': 'html', 'floatfmt': '.3f', 'numalign': 'center', 'stralign': 'center'}})
@eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'''
Error in making predictions - {traceback.format_exc()}
''', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped')
eel.start('index.html', port=0) | Start web based GUI. | predictit/gui_start.py | run_gui | Malachov/predict-it | 7 | python | def run_gui():
import eel
web_path = str((Path(__file__).resolve().parents[0] / 'files_for_GUI'))
eel.init(web_path)
this_path = Path(__file__).resolve().parents[1]
this_path_string = str(this_path)
sys.path.insert(0, this_path_string)
config = predictit.config
predictit.misc.GLOBAL_VARS.GUI = 1
config.update({'show_plot': False, 'save_plot': False, 'data': None, 'table_settings': {'tablefmt': 'html', 'floatfmt': '.3f', 'numalign': 'center', 'stralign': 'center'}})
@eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'
Error in making predictions - {traceback.format_exc()}
', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped')
eel.start('index.html', port=0) | def run_gui():
import eel
web_path = str((Path(__file__).resolve().parents[0] / 'files_for_GUI'))
eel.init(web_path)
this_path = Path(__file__).resolve().parents[1]
this_path_string = str(this_path)
sys.path.insert(0, this_path_string)
config = predictit.config
predictit.misc.GLOBAL_VARS.GUI = 1
config.update({'show_plot': False, 'save_plot': False, 'data': None, 'table_settings': {'tablefmt': 'html', 'floatfmt': '.3f', 'numalign': 'center', 'stralign': 'center'}})
@eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'
Error in making predictions - {traceback.format_exc()}
', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped')
eel.start('index.html', port=0)<|docstring|>Start web based GUI.<|endoftext|> |
1bf499c7e894679510eaa6f8715e021d34751bd22c7867cce10ef805e1fe8173 | @eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'''
Error in making predictions - {traceback.format_exc()}
''', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped') | Function that from web GUI button trigger the predictit main predict function and return results on GUI.
Args:
configured (dict): Some configuration values can be configured in GUI. | predictit/gui_start.py | make_predictions | Malachov/predict-it | 7 | python | @eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'
Error in making predictions - {traceback.format_exc()}
', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped') | @eel.expose
def make_predictions(configured):
'Function that from web GUI button trigger the predictit main predict function and return results on GUI.\n\n Args:\n configured (dict): Some configuration values can be configured in GUI.\n '
config.update(mypythontools.misc.json_to_py(configured))
eel.edit_gui_js('Setup finished', 'progress_phase')
try:
results = predictit.main.predict()
div = results.plot
if config.print_result_details:
eel.add_HTML_element(str(results.best), True, 'content', 'best_result', 'Best result')
eel.add_HTML_element(div, False, 'content', 'ploted_results', 'Interactive plot', ['plot'])
if config.print_table:
eel.add_HTML_element(results.tables.detailed_results, False, 'content', 'models_table', 'Models results', 'table')
eel.execute('ploted_results')
eel.add_delete_button('content')
eel.add_HTML_element(results.tables.time, False, 'content', 'time_parts_table', 'Time schema of prediction', 'table')
eel.add_HTML_element(results.output, True, 'content', 'printed_output', 'Everything printed', 'pre-wrapped')
except Exception:
eel.add_HTML_element(f'
Error in making predictions - {traceback.format_exc()}
', True, 'progress_phase', 'error-log', 'Error log', 'pre-wrapped')<|docstring|>Function that from web GUI button trigger the predictit main predict function and return results on GUI.
Args:
configured (dict): Some configuration values can be configured in GUI.<|endoftext|> |
45274a98ef2a43301a7787176ac3f02eec94bc5597238d2f9217347f46d178cb | def test_score_screw_axes_equivalent_axes():
'Test the score_screw_axes function (when equivalent axes present).'
laue_group_info = laue_groups['P m -3']
reflections = flex.reflection_table()
reflections['miller_index'] = flex.miller_index([(0, 1, 0), (0, 2, 0), (0, 0, 1), (0, 0, 2)])
reflections['intensity'] = flex.double([0.05, 100.0, 0.02, 100.0])
reflections['variance'] = flex.double([1.0, 1.0, 1.0, 1.0])
(axes, scores) = score_screw_axes(laue_group_info, reflections)
assert (len(scores) == 1)
assert (len(axes) == 1)
assert (axes[0].name == '21a')
assert (scores[0] > 0.99) | Test the score_screw_axes function (when equivalent axes present). | tests/algorithms/symmetry/absences/test_laue_group_info.py | test_score_screw_axes_equivalent_axes | toastisme/dials | 58 | python | def test_score_screw_axes_equivalent_axes():
laue_group_info = laue_groups['P m -3']
reflections = flex.reflection_table()
reflections['miller_index'] = flex.miller_index([(0, 1, 0), (0, 2, 0), (0, 0, 1), (0, 0, 2)])
reflections['intensity'] = flex.double([0.05, 100.0, 0.02, 100.0])
reflections['variance'] = flex.double([1.0, 1.0, 1.0, 1.0])
(axes, scores) = score_screw_axes(laue_group_info, reflections)
assert (len(scores) == 1)
assert (len(axes) == 1)
assert (axes[0].name == '21a')
assert (scores[0] > 0.99) | def test_score_screw_axes_equivalent_axes():
laue_group_info = laue_groups['P m -3']
reflections = flex.reflection_table()
reflections['miller_index'] = flex.miller_index([(0, 1, 0), (0, 2, 0), (0, 0, 1), (0, 0, 2)])
reflections['intensity'] = flex.double([0.05, 100.0, 0.02, 100.0])
reflections['variance'] = flex.double([1.0, 1.0, 1.0, 1.0])
(axes, scores) = score_screw_axes(laue_group_info, reflections)
assert (len(scores) == 1)
assert (len(axes) == 1)
assert (axes[0].name == '21a')
assert (scores[0] > 0.99)<|docstring|>Test the score_screw_axes function (when equivalent axes present).<|endoftext|> |
1ce0facb11e4596c96ac3e8e4d37652d90503905647e6dfc4b7dc236e3212226 | def test_score_space_group():
'Test scoring of space groups by combining axis scores.'
laue_group = laue_groups['P 1 2/m 1']
axis_scores = [0.98]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 21'):
assert (score == pytest.approx(0.98))
elif (sg == 'P 2'):
assert (score == pytest.approx(0.02))
laue_group = laue_groups['P 4/m m m']
axis_scores = [0.95, 1.0, 0.95]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 41 21 2'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 42 21 2'):
assert (score == pytest.approx((0.05 * 0.95)))
elif (sg == 'P 4 21 2'):
assert (score == pytest.approx((0.05 * 0.05)))
else:
assert (score == pytest.approx(0.0))
laue_group = laue_groups['P 6/m']
axis_scores = [0.95, 0.9, 0.85]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 61'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 62'):
assert (score == pytest.approx((0.05 * 0.9)))
elif (sg == 'P 63'):
assert (score == pytest.approx((0.05 * 0.85)))
elif (sg == 'P 6'):
assert (score == pytest.approx(((0.05 * 0.1) * 0.15))) | Test scoring of space groups by combining axis scores. | tests/algorithms/symmetry/absences/test_laue_group_info.py | test_score_space_group | toastisme/dials | 58 | python | def test_score_space_group():
laue_group = laue_groups['P 1 2/m 1']
axis_scores = [0.98]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 21'):
assert (score == pytest.approx(0.98))
elif (sg == 'P 2'):
assert (score == pytest.approx(0.02))
laue_group = laue_groups['P 4/m m m']
axis_scores = [0.95, 1.0, 0.95]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 41 21 2'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 42 21 2'):
assert (score == pytest.approx((0.05 * 0.95)))
elif (sg == 'P 4 21 2'):
assert (score == pytest.approx((0.05 * 0.05)))
else:
assert (score == pytest.approx(0.0))
laue_group = laue_groups['P 6/m']
axis_scores = [0.95, 0.9, 0.85]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 61'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 62'):
assert (score == pytest.approx((0.05 * 0.9)))
elif (sg == 'P 63'):
assert (score == pytest.approx((0.05 * 0.85)))
elif (sg == 'P 6'):
assert (score == pytest.approx(((0.05 * 0.1) * 0.15))) | def test_score_space_group():
laue_group = laue_groups['P 1 2/m 1']
axis_scores = [0.98]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 21'):
assert (score == pytest.approx(0.98))
elif (sg == 'P 2'):
assert (score == pytest.approx(0.02))
laue_group = laue_groups['P 4/m m m']
axis_scores = [0.95, 1.0, 0.95]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 41 21 2'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 42 21 2'):
assert (score == pytest.approx((0.05 * 0.95)))
elif (sg == 'P 4 21 2'):
assert (score == pytest.approx((0.05 * 0.05)))
else:
assert (score == pytest.approx(0.0))
laue_group = laue_groups['P 6/m']
axis_scores = [0.95, 0.9, 0.85]
(space_groups, scores) = score_space_groups(axis_scores, laue_group)
for (sg, score) in zip(space_groups, scores):
if (sg == 'P 61'):
assert (score == pytest.approx(0.95))
elif (sg == 'P 62'):
assert (score == pytest.approx((0.05 * 0.9)))
elif (sg == 'P 63'):
assert (score == pytest.approx((0.05 * 0.85)))
elif (sg == 'P 6'):
assert (score == pytest.approx(((0.05 * 0.1) * 0.15)))<|docstring|>Test scoring of space groups by combining axis scores.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.