function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def owner(*paths, **kwargs):
"""
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.aptpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
"""
if not paths:
return ""
ret = {}
for path in paths:
cmd = ["dpkg", "-S", path]
output = __salt__["cmd.run_stdout"](
cmd, output_loglevel="trace", python_shell=False
)
ret[path] = output.split(":")[0]
if "no path found" in ret[path].lower():
ret[path] = ""
if len(ret) == 1:
return next(iter(ret.values()))
return ret | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def _add(ret, pkginfo):
name = pkginfo.pop("Package", None)
version = pkginfo.pop("Version", None)
if name is not None and version is not None:
ret.setdefault(name, {}).setdefault(version, {}).update(pkginfo) | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def info_installed(*names, **kwargs):
"""
Return the information of the named package(s) installed on the system.
.. versionadded:: 2015.8.1
names
The names of the packages for which to return information.
failhard
Whether to throw an exception if none of the packages are installed.
Defaults to True.
.. versionadded:: 2016.11.3
CLI Example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> failhard=false
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop("failhard", True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items():
t_nfo = dict()
if pkg_nfo.get("status", "ii")[1] != "i":
continue # return only packages that are really installed
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
if key == "package":
t_nfo["name"] = value
elif key == "origin":
t_nfo["vendor"] = value
elif key == "section":
t_nfo["group"] = value
elif key == "maintainer":
t_nfo["packager"] = value
elif key == "homepage":
t_nfo["url"] = value
elif key == "status":
continue # only installed pkgs are returned, no need for status
else:
t_nfo[key] = value
ret[pkg_name] = t_nfo
return ret | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def list_downloaded(root=None, **kwargs):
"""
.. versionadded:: 3000?
List prefetched packages downloaded by apt in the local disk.
root
operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_downloaded
"""
CACHE_DIR = "/var/cache/apt"
if root:
CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
ret = {}
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
for filename in fnmatch.filter(filenames, "*.deb"):
package_path = os.path.join(root, filename)
pkg_info = __salt__["lowpkg.bin_pkg_info"](package_path)
pkg_timestamp = int(os.path.getctime(package_path))
ret.setdefault(pkg_info["name"], {})[pkg_info["version"]] = {
"path": package_path,
"size": os.path.getsize(package_path),
"creation_date_time_t": pkg_timestamp,
"creation_date_time": datetime.datetime.utcfromtimestamp(
pkg_timestamp
).isoformat(),
}
return ret | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def setUp(self):
self.test_dir = os.path.join(os.path.dirname(__file__), "test-data") | kubeflow/testing | [
63,
84,
63,
14,
1517406352
] |
def _split_camelcase(text):
return re.sub(r"(?<=\w)([A-Z])", r" \1", text) | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def _unique_id(version, vendor_id, filename):
# Glyphsapp style 2.000;MYFO;Arsenal-Bold
# version;vendorID;filename
return '%s;%s;%s' % (version, vendor_id, filename) | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def _full_name(family_name, style_name):
style_name = _mac_subfamily_name(style_name)
full_name = '%s %s' % (family_name, style_name)
return full_name | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def _win_subfamily_name(style_name):
name = style_name
if 'BoldItalic' == name:
return 'Bold Italic'
elif 'Italic' in name:
return 'Italic'
elif name == 'Bold':
return 'Bold'
else:
return 'Regular' | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def set_macStyle(style_name):
return MACSTYLE[style_name] | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def nametable_from_filename(filepath):
"""Generate a new nametable based on a ttf and the GF Spec"""
font = TTFont(filepath)
old_table = font['name']
new_table = newTable('name')
filename = ntpath.basename(filepath)[:-4]
family_name, style_name = filename.split('-')
family_name = _split_camelcase(family_name)
font_version = font['name'].getName(5, 3, 1, 1033)
font_version = font_version.toUnicode()
vendor_id = font['OS/2'].achVendID
# SET MAC NAME FIELDS
# -------------------
# Copyright
old_cp = old_table.getName(0, 3, 1, 1033).string.decode('utf_16_be')
new_table.setName(old_cp.encode('mac_roman'), 0, 1, 0, 0)
# Font Family Name
new_table.setName(family_name.encode('mac_roman'), 1, 1, 0, 0)
# Subfamily name
mac_subfamily_name = _mac_subfamily_name(style_name).encode('mac_roman')
new_table.setName(mac_subfamily_name, 2, 1, 0, 0)
# Unique ID
unique_id = _unique_id(_version(font_version), vendor_id, filename)
mac_unique_id = unique_id.encode('mac_roman')
new_table.setName(mac_unique_id, 3, 1, 0, 0)
# Full name
fullname = _full_name(family_name, style_name)
mac_fullname = fullname.encode('mac_roman')
new_table.setName(mac_fullname, 4, 1, 0, 0)
# Version string
old_v = old_table.getName(5, 3, 1, 1033).string.decode('utf_16_be')
mac_old_v = old_v.encode('mac_roman')
new_table.setName(mac_old_v, 5, 1, 0, 0)
# Postscript name
mac_ps_name = filename.encode('mac_roman')
new_table.setName(mac_ps_name, 6, 1, 0, 0)
# SET WIN NAME FIELDS
# -------------------
# Copyright
new_table.setName(old_cp, 0, 3, 1, 1033)
# Font Family Name
win_family_name = _win_family_name(family_name, style_name)
win_family_name = win_family_name.encode('utf_16_be')
new_table.setName(win_family_name, 1, 3, 1, 1033)
# Subfamily Name
win_subfamily_name = _win_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_subfamily_name, 2, 3, 1, 1033)
# Unique ID
win_unique_id = unique_id.encode('utf_16_be')
new_table.setName(win_unique_id, 3, 3, 1, 1033)
# Full name
win_fullname = fullname.encode('utf_16_be')
new_table.setName(win_fullname, 4, 3, 1, 1033)
# Version string
win_old_v = old_v.encode('utf_16_be')
new_table.setName(win_old_v, 5, 3, 1, 1033)
# Postscript name
win_ps_name = filename.encode('utf_16_be')
new_table.setName(win_ps_name, 6, 3, 1, 1033)
if style_name not in WIN_SAFE_STYLES:
# Preferred Family Name
new_table.setName(family_name.encode('utf_16_be'), 16, 3, 1, 1033)
# Preferred SubfamilyName
win_pref_subfam_name = _mac_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_pref_subfam_name, 17, 3, 1, 1033)
# PAD missing fields
# ------------------
for field in REQUIRED_FIELDS:
text = None
if new_table.getName(*field):
pass # Name has already been updated
elif old_table.getName(*field):
text = old_table.getName(*field).string
elif old_table.getName(field[0], 3, 1, 1033):
text = old_table.getName(field[0], 3, 1, 1033).string.decode('utf_16_be')
elif old_table.getName(field[0], 1, 0, 0): # check if field exists for mac
text = old_table.getName(field[0], 3, 1, 1033).string.decode('mac_roman')
if text:
new_table.setName(text, *field)
return new_table | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def main():
args = parser.parse_args()
for font_path in args.fonts:
nametable = nametable_from_filename(font_path)
font = TTFont(font_path)
font_filename = ntpath.basename(font_path)
font['name'] = nametable
style = font_filename[:-4].split('-')[-1]
font['OS/2'].usWeightClass = set_usWeightClass(style)
font['OS/2'].fsSelection = set_fsSelection(font['OS/2'].fsSelection, style)
win_style = font['name'].getName(2, 3, 1, 1033).string.decode('utf_16_be')
font['head'].macStyle = set_macStyle(win_style)
font.save(font_path + '.fix')
print('font saved %s.fix' % font_path) | googlefonts/gftools | [
195,
61,
195,
113,
1507681124
] |
def add_aroma(self):
root = BoxLayout(orientation='vertical', spacing=20)
btn_layout = GridLayout(cols=2, row_force_default=True, row_default_height=50, spacing=25)
add = Button(text='Add', size_hint_x=None, width=150)
cancel = Button(text='Cancel', size_hint_x=None, width=150)
root.add_widget(Label(text='Are You Sure You Want To\nAdd Aroma Inastaller?'))
root.add_widget(btn_layout)
btn_layout.add_widget(add)
btn_layout.add_widget(cancel)
popup = Popup(background='atlas://images/eds/pop', title='Add Aroma',content=root, auto_dismiss=False,
size_hint=(None, None), size=(350, 200))
cancel.bind(on_release=popup.dismiss)
popup.open() | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def callback(instance):
try:
pathtofile = '%s/Aroma/Aroma.zip' % (Tools)
destpath = '%s/META-INF/com/google/android' % (Rom)
z = zipfile.ZipFile(pathtofile)
z.extractall(destpath)
f = open(UScript)
text = f.read()
f.close()
f = open(UScript, 'w')
f.write('''ui_print(""); | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def aroma(self):
self.panel_layout.clear_widgets()
title = Label(text='[b][color=#22A0D6][size=20]Custom Aroma Configuration[/size][/color][/b]', markup = True, pos_hint={'x':-.05, 'y':.20})
name_lbl = Label(text='[b][color=ffffff][size=12]Set Rom Name :[/size][/color][/b]', markup = True, pos_hint={'x':-.30, 'y':.08})
ver_lbl = Label(text='[b][color=ffffff][size=12]Rom Version Number :[/size][/color][/b]', markup = True, pos_hint={'x':-.30, 'y':-.02})
dev_lbl = Label(text='[b][color=ffffff][size=12]Developer Name :[/size][/color][/b]', markup = True, pos_hint={'x':-.30, 'y':-.12})
name = TextInput(text='', multiline=False, pos_hint={'x':.400, 'y':.550}, size_hint=(.50, .05))
ver = TextInput(text='', multiline=False, pos_hint={'x':.400, 'y':.450}, size_hint=(.50, .05))
dev = TextInput(text='', multiline=False, pos_hint={'x':.400, 'y':.350}, size_hint=(.50, .05))
self.panel_layout.add_widget(title)
self.panel_layout.add_widget(name_lbl)
self.panel_layout.add_widget(name)
self.panel_layout.add_widget(ver_lbl)
self.panel_layout.add_widget(ver)
self.panel_layout.add_widget(dev_lbl)
self.panel_layout.add_widget(dev) | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def name_enter(self):
processing_change = False
for line in fileinput.input(Aroma, inplace=1):
if line.startswith('ini_set("rom_name", "'):
processing_change = True
else:
if processing_change:
print r'ini_set("rom_name", "' + name.text + r'");'
processing_change = False
print line, | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def ver_enter(self):
processing_change = False | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def dev_enter(self):
processing_change = False
for line in fileinput.input(Aroma, inplace=1):
if line.startswith(r'ini_set("rom_author", "'):
processing_change = True
else:
if processing_change:
print 'ini_set("rom_author", "' + dev.text + '");'
processing_change = False
print line,
for line in fileinput.input(Terms, inplace=1):
if line.startswith(r'Developer: '):
processing_change = True
else:
if processing_change:
print r'Developer: ' + dev.text + r''
processing_change = False
print line,
for line in fileinput.input(BuildProp, inplace=1):
if line.startswith(r'ro.build.host='):
processing_change = True
else:
if processing_change:
print r'ro.build.host=' + dev.text + r''
processing_change = False
print line, | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def dev_name(self):
fin = open(EdsIni)
for line in fin:
if line.startswith("uname ="):
l = line[7:-1].rstrip('\r\n')
processing_change=False
for line in fileinput.input(Aroma, inplace=1):
if line.startswith(r'ini_set("rom_author", "'):
processing_change = True
else:
if processing_change:
print 'ini_set("rom_author", "' + l + '");'
processing_change = False
print line,
for line in fileinput.input(Terms, inplace=1):
if line.startswith(r'Developer: '):
processing_change = True
else:
if processing_change:
print r'Developer: ' + l + r''
processing_change = False
print line, | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def device(self):
fin = open(BuildProp)
for line in fin:
if line.startswith("ro.product.device="):
l = line[18:-1].rstrip('\r\n')
processing_change=False
for line in fileinput.input(Aroma, inplace=1):
if line.startswith(r'ini_set("rom_device", "'):
processing_change = True
else:
if processing_change:
print r'ini_set("rom_device", "' + l + '");'
processing_change = False
print line, | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def today(self):
today = datetime.date.today()
processing_change = False
for line in fileinput.input(Aroma, inplace=1):
if line.startswith(r' "\t\tUPDATED\t: <#080>'):
processing_change = True
else:
if processing_change:
print r' "\t\tUPDATED\t: <#080>' + today.strftime("%Y/%m/%d") +r'</#>\n\n\n"+'
processing_change = False
print line, | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def boot_img(self):
self.panel_layout.clear_widgets()
title = Label(text='[b][color=#22A0D6][size=20]Boot.img Tools[/size][/color][/b]', markup = True, pos_hint={'x':-.05, 'y':.20})
unpack = CustomButton(text='Unpack Boot.img', pos_hint={'x':.05, 'y':.550}, size_hint=(.40, .06))
build = CustomButton(text='Build Boot.img', pos_hint={'x':.50, 'y':.550}, size_hint=(.40, .06))
self.panel_layout.add_widget(title)
self.panel_layout.add_widget(unpack)
self.panel_layout.add_widget(build) | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def deodex(self):
self.panel_layout.clear_widgets()
deodex_lbl = Label(text='[b][color=#22A0D6][size=20]Deodex Options[/size][/color][/b]', markup = True, pos_hint={'x':-.05, 'y':.20})
self.panel_layout.add_widget(deodex_lbl) | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def odex(self):
self.panel_layout.clear_widgets()
push_layout = GridLayout(cols=2, row_force_default=True, row_default_height=40, spacing=10, pos_hint={'x':-.02, 'y':-.525})
shell_layout = GridLayout(cols=2, row_force_default=True, row_default_height=40, spacing=10, pos_hint={'x':-.02, 'y':-.825})
odex_lbl = Label(text='[b][color=#22A0D6][size=20]Odex Options[/size][/color][/b]', markup = True, pos_hint={'x':-.0, 'y':.20})
easy = CustomButton(text='One Step Odex', pos_hint={'x':.08, 'y':.575}, size_hint=(.80, .08))
remount = CustomButton(text='1. Remount', pos_hint={'x':.28, 'y':.500}, size_hint=(.40, .06))
pdexo = CustomButton(text='2. Push dexo', size_hint=(.40, .06))
pdexopt = CustomButton(text='3. Push dexopt-wrapper ', size_hint=(.40, .06))
pzip = CustomButton(text='4. Push zip', size_hint=(.40, .06))
pzipalign = CustomButton(text='5. Push zipalign', size_hint=(.40, .06))
pbusybox = CustomButton(text='6. Push busybox', size_hint=(.40, .06))
perms = CustomButton(text='7. Set Permissions to 755', pos_hint={'x':.25, 'y':.225}, size_hint=(.40, .06))
sbusybox = CustomButton(text='8. Install busybox', size_hint=(.40, .06))
sdexo = CustomButton(text='9. Run dexo', size_hint=(.40, .06))
frame = CustomButton(text='10. Pull /Framework', size_hint=(.40, .06))
app = CustomButton(text='11. Pull /App', size_hint=(.40, .06))
self.panel_layout.add_widget(odex_lbl)
self.panel_layout.add_widget(push_layout)
self.panel_layout.add_widget(shell_layout)
self.panel_layout.add_widget(easy)
self.panel_layout.add_widget(remount)
push_layout.add_widget(pdexo)
push_layout.add_widget(pdexopt)
push_layout.add_widget(pzip)
push_layout.add_widget(pzipalign)
push_layout.add_widget(pbusybox)
push_layout.add_widget(perms)
shell_layout.add_widget(sbusybox)
shell_layout.add_widget(sdexo)
shell_layout.add_widget(frame)
shell_layout.add_widget(app) | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def odex_now(self):
os.chdir(Tools)
comm = "./adb remount"
output = os.popen(comm).read()
print output
comm = "./adb push " + Odex + '/dexo ' + '/system/bin'
output = os.popen(comm).read()
print output
comm = "./adb push " + Odex + '/dexopt-wrapper ' + '/system/bin'
output = os.popen(comm).read()
print output
comm = "./adb push " + Odex + '/zip' + '/system/xbin'
output = os.popen(comm).read()
print output
comm = "./adb push " + Odex + '/zipalign' + '/system/xbin'
output = os.popen(comm).read()
print output
comm = "./adb push " + Odex + '/busybox ' + '/system/xbin'
output = os.popen(comm).read()
print output
comm = "./adb shell chmod 755 /system/bin/dexo /system/bin/dexopt-wrapper /system/xbin/zip /system/xbin/zipalign /system/xbin/busybox"
output = os.popen(comm).read()
print output
comm = "./adb shell busybox --install /system/xbin"
output = os.popen(comm).read()
print output
comm = "./adb shell dexo"
output = os.popen(comm).read()
print output
os.chdir(Home)
os.mkdir('%s/Desktop/framework' % Home)
os.chdir('%s/Desktop/framework' % Home)
os.chdir(Tools)
comm = "./adb pull /system/framework %s/framework" % Home
output = os.popen(comm).read()
print output
os.chdir(Home)
os.mkdir('%s/Desktop/app' % Home)
os.chdir('%s/Desktop/app' % Home)
os.chdir(Tools)
comm = "./adb pull /system/app %s/app" % Home
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def sys_remount(self):
os.chdir(Tools)
comm = "./adb remount"
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def push_dexo(self):
os.chdir(Tools)
comm = "./adb push " + Odex + '/dexo ' + '/system/bin'
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def push_dexopt(self):
os.chdir(Tools)
comm = "./adb push " + Odex + '/dexopt-wrapper ' + '/system/bin'
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def push_zip(self):
os.chdir(Tools)
comm = "./adb push " + Odex + '/zip' + '/system/xbin'
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def push_zipalign(self):
os.chdir(Tools)
comm = "./adb push " + Odex + '/zipalign' + '/system/xbin'
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def push_busybox(self):
os.chdir(Tools)
comm = "./adb push " + Odex + '/busybox ' + '/system/xbin'
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def change_perms(self):
os.chdir(Tools)
comm = "./adb shell chmod 755 /system/bin/dexo /system/bin/dexopt-wrapper /system/xbin/zip /system/xbin/zipalign /system/xbin/busybox"
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def ins_busybox(self):
os.chdir(Tools)
comm = "./adb shell busybox --install /system/xbin"
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def run_dexo(self):
os.chdir(Tools)
comm = "./adb shell dexo"
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def pull_frame(self):
os.chdir(Home)
os.mkdir('%s/Desktop/framework' % Home)
os.chdir('%s/Desktop/framework' % Home)
os.chdir(Tools)
comm = "./adb pull /system/framework %s/Desktop/framework" % Home
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def pull_app(self):
os.chdir(Home)
os.mkdir('%s/Desktop/app' % Home)
os.chdir('%s/Desktop/app' % Home)
os.chdir(Tools)
comm = "./adb pull /system/app %s/Desktop/app" % Home
output = os.popen(comm).read()
print output | wes342/EasyDevStudio | [
2,
1,
2,
5,
1342553500
] |
def manager_and_vm(request, ssh_key, module_tmpdir, test_config,
logger):
hosts = Hosts(ssh_key, module_tmpdir, test_config, logger, request, 2)
hosts.instances[0] = VM('master', test_config)
hosts.instances[1] = VM('centos_7', test_config)
manager, vm = hosts.instances
passed = True
try:
hosts.create()
yield hosts.instances
except Exception:
passed = False
raise
finally:
hosts.destroy(passed=passed) | cloudify-cosmo/cloudify-system-tests | [
7,
18,
7,
3,
1396352657
] |
def example(manager_and_vm, ssh_key, tmpdir, logger, test_config):
manager, vm = manager_and_vm
example = get_example_deployment(
manager, ssh_key, logger, 'inplace_restore', test_config, vm)
try:
yield example
finally:
if example.installed:
example.uninstall() | cloudify-cosmo/cloudify-system-tests | [
7,
18,
7,
3,
1396352657
] |
def load_mnist_dataset(shape=(-1,784), path="data/mnist/"):
"""Automatically download MNIST dataset
and return the training, validation and test set with 50000, 10000 and 10000
digit images respectively.
Parameters
----------
shape : tuple
The shape of digit images, defaults to (-1,784)
path : string
Path to download data to, defaults to data/mnist/
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784))
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
"""
# We first define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/')
print(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/')
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
print("Load or Download MNIST > {}".format(path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def unpickle(file):
fp = open(file, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def load_ptb_dataset(path='data/ptb/'):
"""Penn TreeBank (PTB) dataset is used in many LANGUAGE MODELING papers,
including "Empirical Evaluation and Combination of Advanced Language
Modeling Techniques", "Recurrent Neural Network Regularization".
It consists of 929k training words, 73k validation words, and 82k test
words. It has 10k words in its vocabulary.
In "Recurrent Neural Network Regularization", they trained regularized LSTMs
of two sizes; these are denoted the medium LSTM and large LSTM. Both LSTMs
have two layers and are unrolled for 35 steps. They initialize the hidden
states to zero. They then use the final hidden states of the current
minibatch as the initial hidden state of the subsequent minibatch
(successive minibatches sequentially traverse the training set).
The size of each minibatch is 20.
The medium LSTM has 650 units per layer and its parameters are initialized
uniformly in [β0.05, 0.05]. They apply 50% dropout on the non-recurrent
connections. They train the LSTM for 39 epochs with a learning rate of 1,
and after 6 epochs they decrease it by a factor of 1.2 after each epoch.
They clip the norm of the gradients (normalized by minibatch size) at 5.
The large LSTM has 1500 units per layer and its parameters are initialized
uniformly in [β0.04, 0.04]. We apply 65% dropout on the non-recurrent
connections. They train the model for 55 epochs with a learning rate of 1;
after 14 epochs they start to reduce the learning rate by a factor of 1.15
after each epoch. They clip the norm of the gradients (normalized by
minibatch size) at 10.
Parameters
----------
path : : string
Path to download data to, defaults to data/ptb/
Returns
--------
train_data, valid_data, test_data, vocabulary size
Examples
--------
>>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()
Code References
---------------
- ``tensorflow.models.rnn.ptb import reader``
Download Links
---------------
- `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`_
"""
print("Load or Download Penn TreeBank (PTB) dataset > {}".format(path))
#Maybe dowload and uncompress tar, or load exsisting files
filename = 'simple-examples.tgz'
url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
maybe_download_and_extract(filename, path, url, extract=True)
data_path = os.path.join(path, 'simple-examples', 'data')
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = nlp.build_vocab(nlp.read_words(train_path))
train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
vocabulary = len(word_to_id)
# print(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']
# print(train_data) # ... 214, 5, 23, 1, 2]
# print(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }
# print(vocabulary) # 10000
# exit()
return train_data, valid_data, test_data, vocabulary | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def load_imdb_dataset(path='data/imdb/', nb_words=None, skip_top=0,
maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3):
"""Load IMDB dataset
Parameters
----------
path : : string
Path to download data to, defaults to data/imdb/
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_imbd_dataset(
... nb_words=20000, test_split=0.2)
>>> print('X_train.shape', X_train.shape)
... (20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
>>> print('y_train.shape', y_train.shape)
... (20000,) [1 0 0 ..., 1 0 1]
References
-----------
- `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`_
"""
filename = "imdb.pkl"
url = 'https://s3.amazonaws.com/text-datasets/'
maybe_download_and_extract(filename, path, url)
if filename.endswith(".gz"):
f = gzip.open(os.path.join(path, filename), 'rb')
else:
f = open(os.path.join(path, filename), 'rb')
X, labels = cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not X:
raise Exception('After filtering for sequences shorter than maxlen=' +
str(maxlen) + ', no sequence was kept. '
'Increase maxlen.')
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = np.array(X[:int(len(X) * (1 - test_split))])
y_train = np.array(labels[:int(len(X) * (1 - test_split))])
X_test = np.array(X[int(len(X) * (1 - test_split)):])
y_test = np.array(labels[int(len(X) * (1 - test_split)):])
return X_train, y_train, X_test, y_test | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def load_wmt_en_fr_dataset(path='data/wmt_en_fr/'):
"""It will download English-to-French translation data from the WMT'15
Website (10^9-French-English corpus), and the 2013 news test from
the same site as development set.
Returns the directories of training data and test data.
Parameters
----------
path : string
Path to download data to, defaults to data/wmt_en_fr/
References
----------
- Code modified from /tensorflow/models/rnn/translation/data_utils.py
Notes
-----
Usually, it will take a long time to download this dataset.
"""
# URLs for WMT data.
_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/"
_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/"
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "training-giga-fren.tar"
maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)
train_path = os.path.join(path, "giga-fren.release2.fixed")
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
gunzip_file(train_path + ".en.gz", train_path + ".en")
return train_path
def get_wmt_enfr_dev_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "dev-v2.tgz"
dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)
dev_name = "newstest2013"
dev_path = os.path.join(path, "newstest2013")
if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
print("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, path)
dev_tar.extract(en_dev_file, path)
return dev_path
print("Load or Download WMT English-to-French translation > {}".format(path))
train_path = get_wmt_enfr_train_set(path)
dev_path = get_wmt_enfr_dev_set(path)
return train_path, dev_path | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def save_npz(save_list=[], name='model.npz', sess=None):
"""Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore.
Parameters
----------
save_list : a list
Parameters want to be saved.
name : a string or None
The name of the .npz file.
sess : None or Session
Examples
--------
>>> tl.files.save_npz(network.all_params, name='model_test.npz', sess=sess)
... File saved to: model_test.npz
>>> load_params = tl.files.load_npz(name='model_test.npz')
... Loading param0, (784, 800)
... Loading param1, (800,)
... Loading param2, (800, 800)
... Loading param3, (800,)
... Loading param4, (800, 10)
... Loading param5, (10,)
>>> put parameters into a TensorLayer network, please see assign_params()
Notes
-----
If you got session issues, you can change the value.eval() to value.eval(session=sess)
References
----------
- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`_
"""
## save params into a list
save_list_var = []
if sess:
save_list_var = sess.run(save_list)
else:
try:
for k, value in enumerate(save_list):
save_list_var.append(value.eval())
except:
print(" Fail to save model, Hint: pass the session into this function, save_npz(network.all_params, name='model.npz', sess=sess)")
np.savez(name, params=save_list_var)
save_list_var = None
del save_list_var
print("[*] %s saved" % name)
## save params into a dictionary
# rename_dict = {}
# for k, value in enumerate(save_dict):
# rename_dict.update({'param'+str(k) : value.eval()})
# np.savez(name, **rename_dict)
# print('Model is saved to: %s' % name) | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def load_npz(path='', name='model.npz'):
"""Load the parameters of a Model saved by tl.files.save_npz().
Parameters
----------
path : a string
Folder path to .npz file.
name : a string or None
The name of the .npz file.
Returns
--------
params : list
A list of parameters in order.
Examples
--------
- See save_npz and assign_params
References
----------
- `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`_
"""
## if save_npz save params into a dictionary
# d = np.load( path+name )
# params = []
# print('Load Model')
# for key, val in sorted( d.items() ):
# params.append(val)
# print('Loading %s, %s' % (key, str(val.shape)))
# return params
## if save_npz save params into a list
d = np.load( path+name )
# for val in sorted( d.items() ):
# params = val
# return params
return d['params']
# print(d.items()[0][1]['params'])
# exit()
# return d.items()[0][1]['params'] | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def assign_params(sess, params, network):
"""Assign the given parameters to the TensorLayer network.
Parameters
----------
sess : TensorFlow Session. Automatically run when sess is not None.
params : a list
A list of parameters in order.
network : a :class:`Layer` class
The network to be assigned
Returns
--------
ops : list
A list of tf ops in order that assign params. Support sess.run(ops) manually.
Examples
--------
>>> Save your network as follow:
>>> tl.files.save_npz(network.all_params, name='model_test.npz')
>>> network.print_params()
...
... Next time, load and assign your network as follow:
>>> tl.layers.initialize_global_variables(sess)
>>> load_params = tl.files.load_npz(name='model_test.npz')
>>> tl.files.assign_params(sess, load_params, network)
>>> network.print_params()
References
----------
- `Assign value to a TensorFlow variable <http://stackoverflow.com/questions/34220532/how-to-assign-value-to-a-tensorflow-variable>`_
"""
ops = []
for idx, param in enumerate(params):
ops.append(network.all_params[idx].assign(param))
if sess is not None:
sess.run(ops)
return ops | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def save_any_to_npy(save_dict={}, name='file.npy'):
"""Save variables to .npy file.
Examples
---------
>>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy')
>>> data = tl.files.load_npy_to_any(name='test.npy')
>>> print(data)
... {'data': ['a','b']}
"""
np.save(name, save_dict) | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'):
"""Convert the first weight matrix of .npz file to .pdf by using tl.visualize.W().
Parameters
----------
path : a string or None
A folder path to npz files.
regx : a string
Regx for the file name.
Examples
--------
>>> Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.
>>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)')
"""
file_list = load_file_list(path=path, regx=regx)
for f in file_list:
W = load_npz(path, f)[0]
print("%s --> %s" % (f, f.split('.')[0]+'.pdf'))
visualize.W(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def load_file_list(path=None, regx='\.npz', printable=True):
"""Return a file list in a folder by given a path and regular expression.
Parameters
----------
path : a string or None
A folder path.
regx : a string
The regx of file name.
printable : boolean, whether to print the files infomation.
Examples
----------
>>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\.(npz)')
"""
if path == False:
path = os.getcwd()
file_list = os.listdir(path)
return_list = []
for idx, f in enumerate(file_list):
if re.search(regx, f):
return_list.append(f)
# return_list.sort()
if printable:
print('Match file list = %s' % return_list)
print('Number of files = %d' % len(return_list))
return return_list | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def exists_or_mkdir(path, verbose=True):
"""Check a folder by given name, if not exist, create the folder and return False,
if directory exists, return True.
Parameters
----------
path : a string
A folder path.
verbose : boolean
If True, prints results, deaults is True
Returns
--------
True if folder exist, otherwise, returns False and create the folder
Examples
--------
>>> tl.files.exists_or_mkdir("checkpoints/train")
"""
if not os.path.exists(path):
if verbose:
print("[*] creates %s ..." % path)
os.makedirs(path)
return False
else:
if verbose:
print("[!] %s exists ..." % path)
return True | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def _download(filename, working_directory, url_source):
def _dlProgress(count, blockSize, totalSize):
if(totalSize != 0):
percent = float(count * blockSize) / float(totalSize) * 100.0
sys.stdout.write("\r" "Downloading " + filename + "...%d%%" % percent)
sys.stdout.flush()
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
filepath = os.path.join(working_directory, filename)
urlretrieve(url_source+filename, filepath, reporthook=_dlProgress) | zjuela/LapSRN-tensorflow | [
102,
44,
102,
7,
1508356267
] |
def __init__(self):
"""Object mapper starts off with empty value."""
self.value = None
self.seen = set() | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def see(self, key):
if key in self.seen:
raise yaml_errors.DuplicateAttribute("Duplicate attribute '%s'." % key)
self.seen.add(key) | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def __init__(self):
"""Object sequencer starts off with empty value."""
self.value = []
self.constructor = None | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def __init__(self, default_class):
"""Initialize validated object builder.
Args:
default_class: Class that is instantiated upon the detection of a new
document. An instance of this class will act as the document itself.
"""
self.default_class = default_class | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def BuildDocument(self):
"""Instantiate new root validated object.
Returns:
New instance of validated object.
"""
return self.default_class() | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def EndMapping(self, top_value, mapping):
"""When leaving scope, makes sure new object is initialized.
This method is mainly for picking up on any missing required attributes.
Args:
top_value: Parent of closing mapping object.
mapping: _ObjectMapper instance that is leaving scope.
"""
# make sure that mapping.value is a non-built-in type (i.e. can have
# 'CheckInitialized' called on it)
if not hasattr(mapping.value, 'CheckInitialized'):
raise validation.ValidationError('Cannot convert map to non-map value.')
try:
mapping.value.CheckInitialized()
except validation.ValidationError:
# These should just pass through.
raise
except Exception, e:
# Some errors may have problematic encoding or other issues.
# Re-raising an error in this block would be very hard to debug
# for the time being so instead, on error, the value is merely
# obscured.
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
# Wrap in a ValidationError
raise validation.ValidationError(error_str, e) | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def MapTo(self, subject, key, value):
"""Map key-value pair to an objects attribute.
Args:
subject: _ObjectMapper of object that will receive new attribute.
key: Key of attribute.
value: Value of new attribute.
Raises:
UnexpectedAttribute when the key is not a validated attribute of
the subject value class.
"""
assert isinstance(subject.value, validation.ValidatedBase)
try:
attribute = subject.value.GetValidator(key)
except validation.ValidationError, err:
raise yaml_errors.UnexpectedAttribute(err)
if isinstance(value, _ObjectMapper):
# Now know what class the new instance should be.
# Time to construct it from the attributes expected type.
value.set_value(attribute.expected_type())
value = value.value
elif isinstance(value, _ObjectSequencer):
# Now know what class new instances within the sequence should be.
value.set_constructor(self._GetRepeated(attribute))
value = value.value
subject.see(key)
try:
subject.value.Set(key, value)
except validation.ValidationError, e:
# Some errors may have problematic encoding or other issues.
# Re-raising an error in this block would be very hard to debug
# for the time being so instead, on error, the value is merely
# obscured.
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
try:
value_str = str(value)
except Exception:
value_str = '<unknown>'
# Update error message with a better message.
e.message = ("Unable to assign value '%s' to attribute '%s':\n%s" %
(value_str, key, error_str))
raise e
except Exception, e:
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
try:
value_str = str(value)
except Exception:
value_str = '<unknown>'
# Raise a more generic exception message.
message = ("Unable to assign value '%s' to attribute '%s':\n%s" %
(value_str, key, error_str))
raise validation.ValidationError(message, e) | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def BuildObjects(default_class, stream, loader=yaml.loader.SafeLoader):
"""Build objects from stream.
Handles the basic case of loading all the objects from a stream.
Args:
default_class: Class that is instantiated upon the detection of a new
document. An instance of this class will act as the document itself.
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
Returns:
List of default_class instances parsed from the stream.
"""
builder = ObjectBuilder(default_class)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(stream, loader)
return handler.GetResults() | GoogleCloudPlatform/appengine-config-transformer | [
7,
16,
7,
4,
1435336968
] |
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__)) | 916253/Kurisu | [
60,
102,
60,
34,
1476594488
] |
def option():
return options.copy() | openweave/happy | [
41,
22,
41,
7,
1504117462
] |
def __init__(self, opts=options):
HappyNode.__init__(self)
self.quiet = opts["quiet"]
self.add = opts["add"]
self.delete = opts["delete"]
self.dns = opts["dns"]
self.node_id = opts["node_id"] | openweave/happy | [
41,
22,
41,
7,
1504117462
] |
def __add_node_dns(self, node_id):
nspath = self.nsroot + "/" + self.uniquePrefix(node_id)
resolv_path = nspath + "/" + "resolv.conf"
if not os.path.isdir(nspath):
cmd = "mkdir -p " + nspath
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
if not os.path.exists(resolv_path):
cmd = "touch " + resolv_path
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
cmd = "chmod 666 " + resolv_path
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
with open(resolv_path, 'w') as res:
for dns_addr in self.dns:
line = "nameserver " + dns_addr + "\n"
res.write(line) | openweave/happy | [
41,
22,
41,
7,
1504117462
] |
def __update_nodes_dns(self):
if self.node_id:
nodes = [self.node_id]
else:
nodes = self.getNodeIds()
for node_id in nodes:
if self.add:
self.__add_node_dns(node_id)
else:
self.__remove_node_dns(node_id) | openweave/happy | [
41,
22,
41,
7,
1504117462
] |
def metric_fn(labels, logits):
"""Record metrics for evaluation."""
predictions = tf.argmax(logits, 1)
return {
"accuracy": tf.metrics.precision(labels=labels, predictions=predictions)
} | GoogleCloudPlatform/healthcare | [
332,
163,
332,
30,
1519345980
] |
def get_input_fn(filename):
"""Returns an `input_fn` for training and evaluation."""
def input_fn(params):
# Retrieves the batch size for the current shard. The number of shards is
# computed according to the input pipeline deployment. See
# https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/RunConfig
# for details.
batch_size = params["batch_size"]
def parse(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"label": tf.FixedLenFeature([], tf.int64),
"image": tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(features["image"], tf.float32)
image = tf.reshape(image, [FLAGS.image_width, FLAGS.image_height])
label = tf.cast(features["label"], tf.int32)
return image, label
dataset = tf.data.TFRecordDataset(filename, buffer_size=500000)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
images, labels = dataset.make_one_shot_iterator().get_next()
return images, labels
return input_fn | GoogleCloudPlatform/healthcare | [
332,
163,
332,
30,
1519345980
] |
def toFileName(text, max_length=1000):
return text.replace("/","_").replace("\\","_").replace("'","_").replace('"',"_").replace(".","_").replace(":","_").replace("__","_").replace(" ","_")[:max_length] | FirstDraftGIS/firstdraft | [
10,
1,
10,
14,
1441325314
] |
def generate_map_from_sources(job, data_sources, metadata_sources, debug=False):
try:
print("starting generate_map_from_sources w job")
key = job['key']
max_seconds = int(job.get('max_seconds', 10))
countries = job.get('countries', [])
admin1limits = job.get('admin1limits', [])
order_id = job['order_id']
extra_context = job.get("extra_context", {})
end_user_timezone = extra_context.get("end_user_timezone", None)
case_insensitive = extra_context.get("case_insensitive", None)
# make directory to store input sources and final maps
directory = join(MAPS_DIRECTORY, key)
mkdir(directory)
print("made directory:" + directory)
max_source_text_length = next(f for f in Source._meta.fields if f.name == "source_text").max_length
locations = []
for source in data_sources:
try:
print("source:", source)
source_type = source['type']
source_data = source['data']
if source_type == "text":
print("source_data:", source_data.encode("utf-8"))
print("[generator] creating source object")
source_text = source_data.encode("utf-8") if len(source_data.encode("utf-8")) < max_source_text_length else None
Source.objects.create(order_id=order_id, source_text=source_text, source_type="text")
print("[generator] created source object")
#save_text_to_file(source_data, toFileName(source_data, max_length=20))
locations.extend(extract_locations_from_text(source_data, case_insensitive=case_insensitive))
elif (isinstance(source_data, str) or isinstance(source_data, str)) and validators.url(source_data):
print("source is url")
url = source_data.strip().strip('"').strip('"')
# we want to respect Google, so we avoid adding an automated click through
# by just directly getting the url
if url.startswith("https://www.google.com/url?"):
url = unquote(search("(?<=&url=)[^&]{10,}", url).group(0))
if not url.startswith("http"):
print("we assume that the user didn't include the protocol")
url = "http://" + url
Source.objects.create(order_id=order_id, source_url=url, source_type="url")
extension = url.split(".")[-1]
print("extension:", extension) | FirstDraftGIS/firstdraft | [
10,
1,
10,
14,
1441325314
] |
def named_set(self, elem):
self.named_del(elem.name)
self.append(elem) | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def named_get(self, name):
for elem in self:
if elem.name == name:
return elem | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def names(self):
return map(lambda elem: elem.name, self) | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def __init__(self, **kwargs):
super(Wrapper, self).__init__(**kwargs)
self._inplace_workers = NamedList()
self._projects = NamedList() | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def builders(self):
return self.named_list('builders') | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def schedulers(self):
return self.named_list('schedulers') | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def change_source(self):
return self.named_list('change_source') | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def workers(self):
return self.named_list('workers') | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def inplace_workers(self):
return self._inplace_workers | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def projects(self):
return self._projects | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def load_workers(self, path):
Worker.load(path, self.inplace_workers, self.workers) | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def setup_inplace(self):
self.builders.clear()
self.schedulers.clear()
builder_name = self.DUMMY_NAME
trigger_name = self.DUMMY_TRIGGER
worker_names = self.inplace_workers.names
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=BuildFactory()))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name]))
for project in self.projects:
builder_name = "%s_Builder" % project.name
trigger_name = "Force_%s_Build" % project.name
builder_factory = InplaceBuildFactory(self, project)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=builder_factory))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name])) | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def setup_project_inplace(self, project):
self.setup_inplace()
for worker in self.inplace_workers:
log.msg("Got worker '%s' for platform %s and setups %s" %
(worker.name, pformat(worker.platforms), pformat(worker.setups)),
system='Inplace Config')
for profile in project.inplace.profiles:
worker_names = self.project_profile_worker_names(profile)
if not worker_names:
log.msg("Failed to find worker for platform '%s' and setups '%s' (project '%s')" %
(profile.platform, pformat(profile.setups), project.name),
system='Inplace Config')
continue # profile not executable
builder_name = "_".join([project.name, profile.platform, profile.name])
trigger_name = _project_profile_trigger_name(project.name, profile)
build_factory = SetupBuildFactory(self, project, profile)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=build_factory))
self.schedulers.named_set(Triggerable(name=trigger_name, builderNames=[builder_name])) | hicknhack-software/buildbot-inplace-config | [
5,
1,
5,
1,
1427118672
] |
def test_assert_output(self):
def f():
print('abc')
print('123')
sys.stderr.writelines(['def\n', '456\n'])
self.assertOutput('abc\n123\n', 'def\n456\n', f) | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def f():
print('abc')
print('123')
sys.stderr.writelines(['def\n', '456\n']) | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def test_assert_system_exit(self):
self.assertSystemExit(123, lambda: sys.exit(123))
self.assertSystemExit(234, lambda x: sys.exit(x), 234) | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def test_with_bytes_output_types(self):
# accepts unicode
def f(data, expected):
with self.withBytesOutput() as (out, err):
for d in data:
out.write(d)
self.assertEqual(out.getvalue(), expected)
f(['γγγγγ'], 'γγγγγ'.encode('utf-8'))
f([b'\xff', 'γγγγγ'], b'\xff' + 'γγγγγ'.encode('utf-8'))
# accepts only string-like types
self.assertRaises(TypeError, f, [[]])
self.assertRaises(TypeError, f, [{'a': 20}])
self.assertRaises(TypeError, f, [1.23]) | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def f(text):
with self.withAssertOutputFile(os.path.join('tests', 'resources', 'utf8_ja.txt')) as out:
out.write(text.encode('utf-8')) | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def test_assert_raises_message(self):
class MyException(Exception):
pass
def f(msg):
raise MyException(msg)
self.assertRaisesMessage(MyException, 'γγγγγ', f, 'γγγγγ')
self.assertRaisesMessage(AssertionError, 'MyException not raised',
self.assertRaisesMessage, MyException, 'γγγγγ', lambda: None)
if six.PY2:
expected = ("u'\\u3042\\u3044\\u3046\\u3048' != u'\\u3042\\u3044\\u3046\\u3048\\u304a'\n" +
"- \u3042\u3044\u3046\u3048\n+ \u3042\u3044\u3046\u3048\u304a\n? +\n")
else:
expected = "'γγγγ' != 'γγγγγ'\n- γγγγ\n+ γγγγγ\n? +\n"
self.assertRaisesMessage(AssertionError, expected,
self.assertRaisesMessage, MyException, 'γγγγγ', f, 'γγγγ') | mogproject/mog-commons-python | [
2,
1,
2,
2,
1444292691
] |
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# if the event isn't from the remote we're interested in don't handle it
if Event.Remote not in USES_LIRC_REMOTES:
return False
# process the key
if Event.Button == "Power":
# if mythfrontend is open, kill it
subprocess.Popen(["killall", "mythfrontend"])
return True
elif Event.Button == "TV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_A)
return True
elif Event.Button == "Music":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_B)
return True
elif Event.Button == "Pictures":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH)
return True
elif Event.Button == "Videos":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Stop":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "Record":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_R)
return True
elif Event.Button == "Pause":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Rewind":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_COMMA, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Play":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Forward":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOT, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Replay":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEUP)
return True
elif Event.Button == "Back":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ESC)
return True
elif Event.Button == "Up":
return False
elif Event.Button == "Skip":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEDOWN)
return True
elif Event.Button == "More":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_M)
return True
elif Event.Button == "Left":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT)
return True
elif Event.Button == "OK":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER)
return True
elif Event.Button == "Right":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT)
return True
elif Event.Button == "Down":
return False
elif Event.Button == "VolUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHTBRACE)
return True
elif Event.Button == "VolDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFTBRACE)
return True
elif Event.Button == "Home":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_END)
return True
elif Event.Button == "ChanUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_UP)
return True
elif Event.Button == "ChanDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOWN)
return True
elif Event.Button == "RecTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_HOME)
return True
elif Event.Button == "Mute":
return False
elif Event.Button == "DVD":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_H)
return True
elif Event.Button == "Guide":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "LiveTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_N)
return True
elif Event.Button == "One":
return False
elif Event.Button == "Two":
return False
elif Event.Button == "Three":
return False
elif Event.Button == "Four":
return False
elif Event.Button == "Five":
return False
elif Event.Button == "Six":
return False
elif Event.Button == "Seven":
return False
elif Event.Button == "Eight":
return False
elif Event.Button == "Nine":
return False
elif Event.Button == "Star":
return False
elif Event.Button == "Zero":
return False
elif Event.Button == "Hash":
return False
elif Event.Button == "Clear":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_C)
return True
elif Event.Button == "Enter":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_I)
return True
else:
# unmatched event, keep processing
return False | jtriley/gizmod | [
11,
4,
11,
2,
1319048459
] |
def onEvent(self, Event, Gizmo = None):
"""
Overloading Base Class' onEvent method!
Make sure to call it!
""" | jtriley/gizmod | [
11,
4,
11,
2,
1319048459
] |
def __init__(self):
"""
Default Constructor
""" | jtriley/gizmod | [
11,
4,
11,
2,
1319048459
] |
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-c', '--config',
help="magma web config file (default: production.ini)",
default="production.ini", type=str)
sp = self.parser.add_subparsers()
sc_add = sp.add_parser(
"add", help=self.add.__doc__, description=self.add.__doc__)
sc_add.add_argument(
'user', help="user id (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'name', help="name (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'email', help="e-mail address (default: %(default)s)",
default=None, type=str)
sc_add.add_argument(
'password', help="password (default: %(default)s)",
default=None, type=str)
sc_add.set_defaults(func=self.add)
sc_update = sp.add_parser(
"update", help=self.update.__doc__,
description=self.update.__doc__)
sc_update.add_argument(
'-u', '--user', help="change user id)",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-n', '--name', help="change display name",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-e', '--email', help="change email",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'-p', '--password', help="change password",
default=argparse.SUPPRESS, type=str)
sc_update.add_argument(
'userid', help="user id (default: %(default)s)",
default=None, type=str)
sc_update.set_defaults(func=self.update)
sc_remove = sp.add_parser(
"remove", help=self.remove.__doc__,
description=self.remove.__doc__)
sc_remove.add_argument(
'user', help="user id (default: %(default)s)",
default=None, type=str)
sc_remove.set_defaults(func=self.remove)
sc_owner = sp.add_parser(
"owner", help=self.owner.__doc__, description=self.owner.__doc__)
sc_owner.add_argument(
'job', help="job identifier", default=None, type=str)
sc_owner.add_argument(
'user', help="user id", default=None, type=str)
sc_owner.set_defaults(func=self.owner)
sc_import = sp.add_parser(
"importjob", help=self.importjob.__doc__,
description=self.importjob.__doc__)
sc_import.add_argument(
'dbfile', help="job sqlite result db file",
default=None, type=argparse.FileType('r'))
sc_import.add_argument(
'owner', help="user id", default=None, type=str)
sc_import.set_defaults(func=self.importjob) | NLeSC/MAGMa | [
12,
5,
12,
7,
1348147214
] |
def update(self, args):
"Update user data"
user = User.by_id(args.userid)
if 'user' in args:
user.userid = args.user
for job in user.jobs:
job.owner = args.user
JobMeta.add(job)
if 'name' in args:
user.displayname = args.name
if 'email' in args:
user.email = args.email
if 'password' in args:
user.password = args.password
User.add(user)
commit() | NLeSC/MAGMa | [
12,
5,
12,
7,
1348147214
] |
def owner(self, args):
"""Alter owner of job"""
job = self.job_factory.fromId(args.job)
job.meta.owner = args.user
JobMeta.add(job.meta)
commit() | NLeSC/MAGMa | [
12,
5,
12,
7,
1348147214
] |
def configure(self, config_file):
config_url = 'config:' + config_file
cwd = os.getcwd()
self.config = appconfig(config_url, 'MAGMaWeb', relative_to=cwd)
engine = engine_from_config(self.config)
init_user_db(engine)
self.job_factory = make_job_factory(self.config) | NLeSC/MAGMa | [
12,
5,
12,
7,
1348147214
] |
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossMultiLabel, self).__init__() | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
""" | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(AsymmetricLossSingleLabel, self).__init__() | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, inputs, target, reduction=None):
""""
Parameters
----------
x: input logits
y: targets (1-hot vector)
""" | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def test_split(self):
assert split_command_line('foo') == ['foo']
assert split_command_line(['foo']) == ['foo']
assert split_command_line('foo bar') == ['foo', 'bar']
assert split_command_line(['foo bar']) == ['foo', 'bar'] | mbedmicro/pyOCD | [
883,
424,
883,
228,
1382710205
] |
def test_split_whitespace(self):
assert split_command_line('a b') == ['a', 'b']
assert split_command_line('a\tb') == ['a', 'b']
assert split_command_line('a\rb') == ['a', 'b']
assert split_command_line('a\nb') == ['a', 'b']
assert split_command_line('a \tb') == ['a', 'b'] | mbedmicro/pyOCD | [
883,
424,
883,
228,
1382710205
] |
Subsets and Splits