function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def get_process_types(self, displayname=None, add_info=False):
"""Get a list of process types with the specified name."""
params = self._get_params(displayname=displayname)
return self._get_instances(Processtype, add_info=add_info, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_protocols(self, name=None, add_info=False):
"""Get the list of existing protocols on the system """
params = self._get_params(name=name)
return self._get_instances(Protocol, add_info=add_info, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_reagent_lots(self, name=None, kitname=None, number=None,
start_index=None):
"""Get a list of reagent lots, filtered by keyword arguments.
name: reagent kit name, or list of names.
kitname: name of the kit this lots belong to
number: lot number or list of lot number
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name, kitname=kitname, number=number,
start_index=start_index)
return self._get_instances(ReagentLot, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def _get_params(self, **kwargs):
"Convert keyword arguments to a kwargs dictionary."
result = dict()
for key, value in kwargs.items():
if value is None: continue
result[key.replace('_', '-')] = value
return result | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def _get_instances(self, klass, add_info=None, params=dict()):
results = []
additionnal_info_dicts = []
tag = klass._TAG
if tag is None:
tag = klass.__name__.lower()
root = self.get(self.get_uri(klass._URI), params=params)
while params.get('start-index') is None: # Loop over all pages.
for node in root.findall(tag):
results.append(klass(self, uri=node.attrib['uri']))
info_dict = {}
for attrib_key in node.attrib:
info_dict[attrib_key] = node.attrib['uri']
for subnode in node:
info_dict[subnode.tag] = subnode.text
additionnal_info_dicts.append(info_dict)
node = root.find('next-page')
if node is None: break
root = self.get(node.attrib['uri'], params=params)
if add_info:
return results, additionnal_info_dicts
else:
return results | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def put_batch(self, instances):
"""Update multiple instances using a single batch request."""
if not instances:
return
root = None # XML root element for batch request
for instance in instances:
if root is None:
klass = instance.__class__
# Tag is art:details, con:details, etc.
example_root = instance.root
ns_uri = re.match("{(.*)}.*", example_root.tag).group(1)
root = ElementTree.Element("{%s}details" % (ns_uri))
root.append(instance.root)
uri = self.get_uri(klass._URI, 'batch/update')
data = self.tostring(ElementTree.ElementTree(root))
root = self.post(uri, data) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def tostring(self, etree):
"Return the ElementTree contents as a UTF-8 encoded XML string."
outfile = BytesIO()
self.write(outfile, etree)
return outfile.getvalue() | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def date_for_month(month, day, hour, minute):
timez = pytz.timezone('US/Pacific')
return timez.localize(datetime(YEAR, month, day, hour, minute)) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def june(day, hour, minute):
return date_for_month(6, day, hour, minute) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_game_date():
assert_equals(game.pretty_date, 'April 1') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_game_description():
assert_equals(game.description, 'at LA Dodgers') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_all_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
teams = baseball.teams()
assert_equals(len(teams), 30) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_first_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
team = baseball.teams()[0]
assert_equals(team['name'], 'Baltimore Orioles')
assert_equals(team['league'], 'AMERICAN')
assert_equals(team['division'], 'EAST')
assert_equals(team['links']['schedule'],
'http://espn.go.com/mlb/teams/schedule?team=bal') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_results(_get):
_get().content = open('tests/fixtures/schedule.html').read()
results, _ = baseball.schedule('WEST', 'http://example.com')
assert_equals(results, [
baseball.Result('LA Dodgers', april(1, 13, 5), False, False, '4-0'),
baseball.Result('LA Dodgers', april(2, 13, 5), False, True, '3-0'),
baseball.Result('LA Dodgers', april(3, 13, 5), False, True, '5-3'),
baseball.Result('St. Louis', april(5, 13, 5), True, True, '1-0'),
]) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_no_next_game(_get):
_get().content = open('tests/fixtures/schedule_current_game.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_next_game_against_bluejays(_get):
_get().content = \
open('tests/fixtures/bluejays_with_double_header.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert game_time is not None
assert_equals('330604126', game_id) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_next_game(_get):
_get().content = open('tests/fixtures/schedule.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126')
assert_equals(game_time, april(6, 13, 5)) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_upcoming(_get):
_get().content = open('tests/fixtures/schedule.html').read()
_, upcoming = baseball.schedule('WEST', 'http://example.com')
assert_equals(upcoming, [
baseball.Result('St. Louis', april(6, 13, 5), True, None, '0-0'),
baseball.Result('St. Louis', april(7, 13, 5), True, None, '0-0'),
baseball.Result('Colorado', april(8, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(9, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(10, 12, 45), True, None, '0-0'),
]) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_upcoming_with_skipped(_get):
webpage = open('tests/fixtures/bluejays_with_double_header.html').read()
_get().content = webpage
_, upcoming = baseball.schedule('WEST', 'http://example.com')
print(upcoming[0].opponent)
assert_equals(upcoming, [
baseball.Result('Toronto', june(4, 19, 15), True, None, '0-0'),
baseball.Result('Toronto', june(5, 12, 45), True, None, '0-0'),
baseball.Result('Arizona', june(7, 18, 40), False, None, '0-0'),
baseball.Result('Arizona', june(8, 19, 10), False, None, '0-0'),
baseball.Result('Arizona', june(9, 13, 10), False, None, '0-0'),
]) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_standings(_get):
_get().content = open('tests/fixtures/standings.html').read()
standings = baseball.current_standings('NATIONAL', 'WEST')
examples = [
baseball.Standing('San Francisco', 'SF', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Colorado', 'COL', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Arizona', 'ARI', 2, 1, .667, 0.5, 'Won 1'),
baseball.Standing('LA Dodgers', 'LAD', 1, 2, .333, 1.5, 'Lost 2'),
baseball.Standing('San Diego', 'SD', 1, 3, .250, 2.0, 'Lost 1'),
]
assert_equals(standings, examples) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_parse_gametime_postponed():
gt = baseball.parse_gametime("Mon, Apr 1", "POSTPONED")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_no_team_info():
with assert_raises(Exception):
baseball.team_info('Giantssjk') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_normalize():
assert_equals(baseball.normalize('Giants'), 'GIANTS')
assert_equals(baseball.normalize('Francisco Giants'), 'FRANCISCOGIANTS')
assert_equals(baseball.normalize('Red-Sox'), 'REDSOX') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_preview_gametime():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
assert_equals(baseball.parse_game_time(soup),
datetime(2013, 4, 13, 17, 5, tzinfo=timezone.utc)) | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def test_preview_pitcher():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
pitcher = baseball.parse_starting_pitcher(soup, 0)
assert_equals(pitcher.name, "Bumgarner")
assert_equals(pitcher.era, 0.96)
assert_equals(pitcher.record, '2-0') | kyleconroy/vogeltron | [
9,
2,
9,
2,
1365205105
] |
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {}) | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def list_by_resource_group(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def numpy_ndarray():
import numpy
return numpy.ndarray | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem, self)._update_proxy(change) | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def _update_range(self, change):
"""Handle updates and changes"""
getattr(self.proxy, "set_%s" % change["name"])(change["value"]) | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem2D, self)._update_proxy(change) | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(PlotItem3D, self)._update_proxy(change) | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
# The superclass handler implementation is sufficient.
super(AbstractDataPlotItem, self)._update_proxy(change) | frmdstryr/enamlx | [
28,
8,
28,
14,
1440384663
] |
def __init__(self, parent):
self.myParent = parent
self.topContainer = Frame(parent)
self.topContainer.pack(side=TOP, expand=1, fill=X, anchor=NW)
self.btmContainer = Frame(parent)
self.btmContainer.pack(side=BOTTOM, expand=1, fill=X, anchor=NW)
path = StringVar()
ver = StringVar()
path.set(myloc + "\\NordInvasion")
ver.set("0.4.9")
entry1 = Entry(self.topContainer, textvariable=path)
entry1.pack(side=LEFT, expand=1, fill=X)
entry2 = Entry(self.topContainer, textvariable=ver, width =7)
entry2.pack(side=LEFT, expand=0)
#------------------ BUTTON #1 ------------------------------------
button_name = "OK"
# command binding
var = StringVar()
self.button1 = Button(self.topContainer, command=lambda: self.buttonPress(entry1.get(),var,entry2.get())) | Naozumi/hashgen | [
1,
2,
1,
1,
1421020772
] |
def writeOut(self,dir,hashfile,toplevel,var,folder):
""" walks a directory, and executes a callback on each file """
dir = os.path.abspath(dir)
for file in [file for file in os.listdir(dir) if not file in [".",".."]]:
nfile = os.path.join(dir,file)
if os.path.isdir(nfile): # is a directory
hashfile.write("F::"+nfile.replace(toplevel,"") + "\n")
hashfile.write("X::\n")
var.set("Generating... " + "F::"+nfile.replace(toplevel,""))
root.update()
if not os.path.exists(folder + '\\' + nfile.replace(toplevel,"")):
os.mkdir(folder + '\\' + nfile.replace(toplevel,""))
self.writeOut(nfile,hashfile,toplevel,var,folder)
else: # is a file
# Generate the hash and add to hash file
h=(hashlib.sha1(open(nfile, 'rb').read()).hexdigest())
hashfile.write(nfile.replace(toplevel,"") + "\n")
var.set("Generating... " + nfile.replace(toplevel,""))
root.update()
hashfile.write(h + "\n")
# Generate a smaller, gzipped version of the file
with open(nfile, 'rb') as f_in:
with gzip.open(folder + '\\' + nfile.replace(toplevel,"") + '.gz', 'wb') as f_out:
f_out.writelines(f_in) | Naozumi/hashgen | [
1,
2,
1,
1,
1421020772
] |
def buttonHandler_a(self, path, var):
self.buttonPress(path, var) | Naozumi/hashgen | [
1,
2,
1,
1,
1421020772
] |
def test_latest(self):
_travis = _get_travispy()
repo = get_travis_repo(_travis, 'travispy/on_pypy')
builds = get_historical_builds(_travis, repo)
build = next(builds)
assert build.repository_id == 2598880
assert build.id == repo.last_build_id | jayvdb/travis_log_fetch | [
1,
3,
1,
6,
1445831087
] |
def test_all_small(self):
_travis = _get_travispy()
repo = get_travis_repo(_travis, 'travispy/on_pypy')
builds = get_historical_builds(_travis, repo)
ids = []
for build in builds:
assert build.repository_id == 2598880
ids.append(build.id)
assert ids == [53686685, 37521698, 28881355] | jayvdb/travis_log_fetch | [
1,
3,
1,
6,
1445831087
] |
def test_multiple_batches_bootstrap(self):
"""Test using a repository that has lots of builds, esp. PRs."""
_travis = _get_travispy()
repo = get_travis_repo(_travis, 'twbs/bootstrap')
builds = get_historical_builds(_travis, repo,
_after=12071,
_load_jobs=False)
ids = []
prev_number = None
for build in builds:
assert build.repository_id == 12962
if int(build.number) in [12069, 12062, 12061, 12054, 12049,
12048, 12041, 12038, 12037, 12033]:
# Many duplicates
# See https://github.com/travis-ci/travis-ci/issues/2582
print('duplicate build number {0}: {1}'.format(
build.number, build.id))
if build.id in [53437234, 53350534, 53350026,
53263731, 53263730, # two extra 12054
53180440, 53179846, 53062896, 53019568,
53004896, 52960766]:
assert prev_number == int(build.number)
else:
assert prev_number == int(build.number) + 1
elif prev_number:
# All other build numbers decrease rather orderly
assert prev_number == int(build.number) + 1
prev_number = int(build.number)
if ids:
assert build.id < ids[-1]
ids.append(build.id)
# There are many more duplicates, so we stop here.
if int(build.number) == 12033:
break
assert len(ids) == len(set(ids)) | jayvdb/travis_log_fetch | [
1,
3,
1,
6,
1445831087
] |
def test_logical_multiple_job_build(self):
target = Target.from_extended_slug('menegazzo/travispy#101.3')
_travis = _get_travispy()
job = get_historical_job(_travis, target)
assert job.repository_id == 2419489
assert job.number == '101.3'
assert job.id == 82131391 | jayvdb/travis_log_fetch | [
1,
3,
1,
6,
1445831087
] |
def expand_matcher_configs(ctx, param, matcher_configs):
"""Expand the given matcher configuration files
Expanding directories recursively for YAML files.
"""
expanded_matcher_configs = []
for matcher_config_file_location in (Path(f) for f in matcher_configs):
if matcher_config_file_location.is_dir():
expanded_matcher_configs.extend(
list(matcher_config_file_location.glob("**/*.yml"))
)
else:
expanded_matcher_configs.append(matcher_config_file_location)
return expanded_matcher_configs | radish-bdd/radish | [
176,
45,
176,
16,
1433181573
] |
def cli(**kwargs):
"""radish - The root from red to green. BDD tooling for Python.
radish-test can be used to perform tests for the Steps
implemented in a radish base directory.
Use the `MATCHER_CONFIGS` to pass configuration
files containing the Step matchers.
"""
config = Config(kwargs)
# turn of ANSI colors if requested
if config.no_ansi:
cf.disable()
logger.debug("Basedirs: %s", ", ".join(str(d) for d in config.basedirs))
logger.debug("Loading all modules from the basedirs")
loaded_modules = loader.load_modules(config.basedirs)
logger.debug(
"Loaded %d modules from the basedirs: %s",
len(loaded_modules),
", ".join(str(m) for m in loaded_modules),
)
logger.debug(
"Matcher configs: %s", ", ".join(str(m) for m in config.matcher_configs)
)
coverage_config = CoverageConfig(config.show_missing, config.show_missing_templates)
run_matcher_tests(config.matcher_configs, coverage_config, step_registry) | radish-bdd/radish | [
176,
45,
176,
16,
1433181573
] |
def __init__(self, remote, path, repo_type):
"""constructor"""
self.repo_type = repo_type
self.remote = remote
self.path = path
self.sql_database = os.path.join(self.path, 'database.sqlite')
self.local_repo = self.connect2repo()
self.root_path = os.path.normpath(__file__) | roscoeZA/GeoGigSync | [
1,
1,
1,
1,
1427025939
] |
def export_to_shapefiles(self):
for t in self.local_repo.trees:
if t.path not in ("layer_statistics", "views_layer_statistics", "virts_layer_statistics"):
self.local_repo.exportshp('HEAD', t.path, os.path.join('HEAD', t.path,
os.path.join(self.path, t.path) + '.shp'))
# layer = qgis.utils.iface.addVectorLayer(os.path.join(self.path, t.path) + '.shp', t.path, "ogr")
vl = QgsVectorLayer("Point", "temporary_points", "memory")
print layer.geometryType()
pr = vl.dataProvider()
layer = qgis.utils.iface.addVectorLayer(os.path.join(self.path, t.path) + '.shp', t.path, "ogr")
# layers = QgsMapLayerRegistry.instance().mapLayers()
# for name, layer in layers.iteritems():
# print 'name: ' + str(name), 'layer type: ' + str(layer.geometryType())
my_dir = self.path
print 'deleting %s' % my_dir
for fname in os.listdir(my_dir):
if fname.startswith(t.path):
os.remove(os.path.join(my_dir, fname)) | roscoeZA/GeoGigSync | [
1,
1,
1,
1,
1427025939
] |
def add_commit_push(self, name, email, message):
message += " " + str(datetime.now())
self.local_repo.config(geogig.USER_NAME, name)
self.local_repo.config(geogig.USER_EMAIL, email)
try:
self.import_all_shapefiles()
except GeoGigException, e:
print 'Error with import_from_spatialite()'
try:
self.local_repo.addandcommit(message)
print 'Repo added and committed.'
except GeoGigException, e:
print e | roscoeZA/GeoGigSync | [
1,
1,
1,
1,
1427025939
] |
def get_long_description():
with open(os.path.join(os.path.dirname(__file__), "README.rst"), encoding='utf-8') as fp:
return fp.read() | avian2/unidecode | [
427,
56,
427,
15,
1419003867
] |
def verbose(*args):
print(*args) | alonsopg/AuthorProfiling | [
3,
3,
3,
1,
1443224426
] |
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name)) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name)) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name)) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def getid(self):
""" Return the bridge id value. """
return self._show()[1] | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes' | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def showstp(self):
""" Return STP information. """
raise NotImplementedError() | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist) | udragon/pybrctl | [
6,
11,
6,
2,
1421856776
] |
def test_free_basic(self):
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[MagicMock(type="disk")], logical=True)
self.assertTrue(free.is_logical)
self.assertFalse(free.is_extended)
self.assertFalse(free.is_primary)
self.assertEqual(len(free.children), 0)
self.assertEqual(free.type, "free space")
self.assertIsNotNone(free.format)
self.assertIsNone(free.format.type)
self.assertEqual(free.disk, free.parents[0]) | rhinstaller/blivet-gui | [
141,
23,
141,
20,
1399461349
] |
def test_free_disk(self):
# free space on a disk
disk = MagicMock(type="disk", children=[], is_disk=True, format=MagicMock(type=None))
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[disk])
self.assertEqual(free.disk, disk)
# free space in a vg
parent = MagicMock(type="lvmvg", children=[], is_disk=False, format=MagicMock(type=None),
parents=[MagicMock(type="partition", children=[MagicMock()], is_disk=False, parents=[disk],
format=MagicMock(type="lvmpv"))])
free = FreeSpaceDevice(free_size=Size("8 GiB"), dev_id=0, start=0, end=1, parents=[parent])
self.assertEqual(free.disk, disk) | rhinstaller/blivet-gui | [
141,
23,
141,
20,
1399461349
] |
def test_resizable(self):
with patch("blivetgui.blivet_utils.BlivetUtils.blivet_reset", lambda _: True):
storage = BlivetUtils()
device = MagicMock(type="", size=Size("1 GiB"), protected=False, format_immutable=False, children=[])
device.format = MagicMock(exists=True, system_mountpoint=None)
device.format.return_value = None
# swap is not resizable
device.format.configure_mock(type="swap")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertEqual(res.error, _("Resizing of swap format is currently not supported"))
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# mounted devices are not resizable
device.format.configure_mock(type="ext4", system_mountpoint="/")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertEqual(res.error, _("Mounted devices cannot be resized"))
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# resizable device
device.configure_mock(resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=True, type="ext4", system_mountpoint=None)
res = storage.device_resizable(device)
self.assertTrue(res.resizable)
self.assertIsNone(res.error)
self.assertEqual(res.min_size, Size("500 MiB"))
self.assertEqual(res.max_size, Size("2 GiB"))
# resizable device and non-resizable format
device.configure_mock(resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=False, type="ext4")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertIsNone(res.error)
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB"))
# LV with snapshot -> not resizable
with patch("blivetgui.blivet_utils.BlivetUtils._has_snapshots", lambda _, device: True):
device.configure_mock(type="lvmlv", resizable=True, max_size=Size("2 GiB"), min_size=Size("500 MiB"))
device.format.configure_mock(resizable=True, type="ext4")
res = storage.device_resizable(device)
self.assertFalse(res.resizable)
self.assertIsNotNone(res.error)
self.assertEqual(res.min_size, Size("1 MiB"))
self.assertEqual(res.max_size, Size("1 GiB")) | rhinstaller/blivet-gui | [
141,
23,
141,
20,
1399461349
] |
def __init__(self, p_filename):
"""
Load the given paradigm
p_filename is a string representing the filename of a paradigm xml file
"""
# Store input paradigm filename
self.loadParadigm(p_filename)
# set default values (text output, to terminal)
self.format = "html"
self.output = "output.html"
self.css = "simple.css" | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def show(self, p_string):
"""
Process and display the given query
"""
try:
# parse the query
parse = ParadigmQuery(p_string)
except:
print "Could not parse query."
return
try:
# Fetch the parsed tree and make presentation
result = Sentence(self, parse.getTree())
# Check that a presentation actually exists
if result == None:
raise Error
except:
print "Sorry, no result can be returned"
return
try:
# Print HTML output if format is set, otherwise plain text
if self.format == "html":
output = '<html>\n'
# Include CSS if we need to
if self.css <> None:
output += '<link rel="stylesheet" href="'
output += self.css
output += '" type="text/css" media="screen" />\n'
output += '<body>'
output += "<table cellspacing=\"0\" cellpadding=\"0\">"
output += result.getHTML()
output += "</table>\n"
output += '</body></html>\n'
else:
output = result.getText()
except:
output = None
print "--no output--"
return
# Print to terminal if output is set, otherwise to file
if self.output == "term":
print output
else:
print "Output written to file:", self.output
f = open(self.output, 'w')
f.write(output)
# Return happily
return | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def setCSS(self, p_string=None):
"""
Set the file location for a Cascading Stylesheet: None or filename
This allows for simple formatting
"""
if p_string <> None:
print "Using CSS file:", p_string
self.output = p_string | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def loadParadigm(self, p_filename ):
"""
Load the given paradigm (XML file)
Attributes are stored in self.attributes
Data are stored in self.data | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the query tree
"""
# store parameters
self.paradigm = p_paradigm
self.tree = p_tree
# discover the type
self.type = self.getType(self.tree)
# Handle each possible type
if self.type == 'O':
self.item = Sentence(self.paradigm, self.tree[0])
if self.type == 'D':
self.item = Domain(self.paradigm, self.tree)
if self.type == 'H':
self.item = Hierarchy(self.paradigm, self.tree)
if self.type == 'T':
self.item = Table(self.paradigm, self.tree) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getHTML(self):
"""
Returns values in html (table) form
"""
return self.item.getHTML() | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getText(self):
"""
Returns values in plain text form
"""
return self.item.getText() | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getMaxWidth(self):
"""
Returns the width in number of characters
"""
return self.item.getMaxWidth() | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getDepth(self):
"""
Get the depth
"""
return self.item.getDepth() | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the query tree
"""
self.paradigm = p_paradigm
# Validate that this is a domain
assert self.getType(p_tree) == 'D'
# Store the attribute
self.attribute = p_tree[0]
self.error = None
# Check that the requested attribute is available
try:
self.paradigm.attributes[self.attribute]
except KeyError:
self.error = "I couldn't find this attribute: " + self.attribute
print self.error | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getList(self):
"""
Return the domain in list form
"""
return self.paradigm.attributes[self.attribute] | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getHorizontalHTML(self,p_parentSpan=1):
"""
Return a horizontal html table
"""
ret_string = ""
for item in self.getList():
ret_string += "<td>" + item + "</td>"
return "<tr>" + ret_string*p_parentSpan + "</tr>" | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getConditions(self):
"""
Return a list of conditions for each combination (cell)
"""
ret_conds = []
for item in self.getList():
new = {self.attribute: item}
#new[self.attribute] = item
ret_conds.append(new)
return ret_conds | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getSpan(self):
"""
Get the span of this domain (number of elements)
"""
return len(self.getList()) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the tree representation of this part of the query (Tree)
"""
self.paradigm = p_paradigm
self.error = None
self.tree = p_tree
# Validate that this is a Hierarchy
assert self.getType(p_tree) == 'H'
# Validate that the root is a Domain
assert self.getType(p_tree[0]) == 'D'
# Set the root and the leaf
self.root = Domain(self.paradigm, p_tree[0])
self.leaf = Sentence(self.paradigm, p_tree[1]) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getHTML(self):
"""
Return a html table for this hierarchy
"""
ret_string = ""
for index in range(len(self.root.getList())):
leafCells = self.leaf.getHTML()[4:]
ret_string += "<tr><td rowspan=\"" + str(self.leaf.getSpan()) + "\">" + self.root[index] \
+ "</td>" + leafCells
return ret_string | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getText(self):
"""
Return text for this hierarchy
"""
ret_string = ""
# Lengths for rendering display
max_width_root = self.root.getMaxWidth()
max_width_leaf = self.leaf.getMaxWidth()
# add root string and call getText() for leaf node
# (newlines in the leaf node need to have whitespace added)
for index in range(len(self.root.getList())):
ret_string += self.root[index].ljust(max_width_root) + " " \
+ self.leaf.getText().ljust(max_width_leaf).replace('\n',"\n" \
+ " "*(max_width_root+1)) + "\n"
# Remove any blank lines and return the string
re_blank = re.compile('\n[ ]+\n')
return re_blank.sub('\n',ret_string) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getMaxWidth(self):
"""
Return the maximum width (in chars) this hierarchy will take up
"""
return self.root.getMaxWidth() + self.leaf.getMaxWidth() + 1 | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getSpan(self):
"""
Get the span (for HTML tables) of this hierarchy
"""
return self.root.getSpan() * self.leaf.getSpan() | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def __init__(self, p_paradigm, p_tree):
"""
p_paradigm is the given paradigm (attributes and data)
p_tree is the tree representation of this part of the query (Tree)
"""
self.paradigm = p_paradigm
self.error = None
self.tree = p_tree
# Validate that this is a Table
assert self.getType(p_tree) == 'T'
# Set the table arguments
self.horizontal = Sentence(self.paradigm, p_tree[0])
self.vertical = Sentence(self.paradigm, p_tree[1])
self.cells = Sentence(self.paradigm, p_tree[2]) | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getHTML(self):
"""
Return a html table for this table operation
"""
# Start with the dead cell
dead_cell = "<tr><td colspan=\"" + str(self.vertical.getDepth()) \
+ "\" rowspan=\"" + str(self.horizontal.getDepth()) \
+ "\"></td>"
# Insert horizintal header
horizontal_header = self.horizontal.getHorizontalHTML()[4:].replace('td','th')
#horizontal_header = self.horizontal.getHorizontalHTML().replace('td','th')
# Get the vertical header
vertical_header = self.vertical.getHTML().replace('td','th')
str_cells = ""
# Reset conditions
conditions = {}
# get a list of conditions for the row
conditions_v = self.vertical.getConditions()
# for each row
for cond_v in conditions_v:
str_cells += "<tr>"
# get a list of conditions for the row
conditions_h = self.horizontal.getConditions()
# For each column
for cond_h in conditions_h:
# Get the data for this cell, given the hori and vert conditions
cell_data = self.getData(self.cells.tree, dictJoin(cond_v,cond_h))
# Add the cell
str_cells += "<td>" + cell_data + "</td>"
# End the row
str_cells += "</tr>" | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getHorizontalHTML(self,p_parentSpan=1):
"""
Return a horizontal html table (?)
"""
print "?: getHorizontalHTML() called on a table."
return None | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getConditions(self):
"""
Return conditions for this table (?)
"""
print "?: getConditions() called on a table. I don't think so."
return None | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def getSpan(self):
"""
Return span for this table (?)
"""
print "WTF: getSpan() called on a table."
return None | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def dictJoin(dict1,dict2):
"""
A handy function to join two dictionaries
If there is any key overlap, dict1 wins!
(just make sure this doesn't happen)
"""
for key in dict1.keys():
dict2[key] = dict1[key]
return dict2 | RensaProject/nodebox_linguistics_extended | [
2,
11,
2,
1,
1479284521
] |
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200) | archlinux/archweb | [
270,
117,
270,
77,
1370983197
] |
def test_art(self):
response = self.client.get('/art/')
self.assertEqual(response.status_code, 200) | archlinux/archweb | [
270,
117,
270,
77,
1370983197
] |
def test_donate(self):
response = self.client.get('/donate/')
self.assertEqual(response.status_code, 200) | archlinux/archweb | [
270,
117,
270,
77,
1370983197
] |
def test_master_keys(self):
response = self.client.get('/master-keys/')
self.assertEqual(response.status_code, 200) | archlinux/archweb | [
270,
117,
270,
77,
1370983197
] |
def test_feeds(self):
response = self.client.get('/feeds/')
self.assertEqual(response.status_code, 200) | archlinux/archweb | [
270,
117,
270,
77,
1370983197
] |
def __init__(self, *args, **kwargs):
self.hard = kwargs.pop('hard', False)
super().__init__(*args, **kwargs) | Cube777/dotgit | [
158,
13,
158,
2,
1451246258
] |
def apply(self, source, dest):
shutil.copy2(source, dest) | Cube777/dotgit | [
158,
13,
158,
2,
1451246258
] |
def remove(self, source, dest):
if self.hard:
shutil.copy2(source, dest)
else:
os.symlink(source, dest) | Cube777/dotgit | [
158,
13,
158,
2,
1451246258
] |
Subsets and Splits