Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
5,700
def get_default_hub(): """Get default hub implementation """ names = [hub_name] if hub_name else ['pyuv_cffi', 'pyuv', 'epoll'] for name in names: try: module = importlib.import_module('guv.hubs.{}'.format(name)) log.debug('Hub: use {}'.format(name)) return module except __HOLE__: # try the next possible hub pass
ImportError
dataset/ETHPy150Open veegee/guv/guv/hubs/hub.py/get_default_hub
5,701
def get_hub(): """Get the current event hub singleton object .. note :: |internal| """ try: hub = _threadlocal.hub except __HOLE__: # instantiate a Hub try: _threadlocal.Hub except AttributeError: use_hub() hub = _threadlocal.hub = _threadlocal.Hub() return hub
AttributeError
dataset/ETHPy150Open veegee/guv/guv/hubs/hub.py/get_hub
5,702
def to_python(self, value): if value is None or value == '': return StreamValue(self.stream_block, []) elif isinstance(value, StreamValue): return value elif isinstance(value, string_types): try: unpacked_value = json.loads(value) except ValueError: # value is not valid JSON; most likely, this field was previously a # rich text field before being migrated to StreamField, and the data # was left intact in the migration. Return an empty stream instead # (but keep the raw text available as an attribute, so that it can be # used to migrate that data to StreamField) return StreamValue(self.stream_block, [], raw_text=value) if unpacked_value is None: # we get here if value is the literal string 'null'. This should probably # never happen if the rest of the (de)serialization code is working properly, # but better to handle it just in case... return StreamValue(self.stream_block, []) return self.stream_block.to_python(unpacked_value) else: # See if it looks like the standard non-smart representation of a # StreamField value: a list of (block_name, value) tuples try: [None for (x, y) in value] except (TypeError, __HOLE__): # Give up trying to make sense of the value raise TypeError("Cannot handle %r (type %r) as a value of StreamField" % (value, type(value))) # Test succeeded, so return as a StreamValue-ified version of that value return StreamValue(self.stream_block, value)
ValueError
dataset/ETHPy150Open torchbox/wagtail/wagtail/wagtailcore/fields.py/StreamField.to_python
5,703
def __virtual__(): ''' Only work on systems that are a proxy minion ''' try: if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'rest_sample': return __virtualname__ except __HOLE__: return (False, 'The rest_package execution module failed to load. Check the proxy key in pillar.') return (False, 'The rest_package execution module failed to load: only works on a rest_sample proxy minion.')
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/rest_package.py/__virtual__
5,704
def setUp(self): self.moderation = ModerationManager() self.moderation.register(ModelWithSlugField) self.filter_moderated_objects = ModelWithSlugField.objects.\ filter_moderated_objects def filter_moderated_objects(query_set): from moderation.models import MODERATION_STATUS_PENDING,\ MODERATION_STATUS_REJECTED exclude_pks = [] for obj in query_set: try: if obj.moderated_object.moderation_status\ in [MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED]\ and obj.__dict__ == \ obj.moderated_object.changed_object.__dict__: exclude_pks.append(object.pk) except __HOLE__: pass return query_set.exclude(pk__in=exclude_pks) setattr(ModelWithSlugField.objects, 'filter_moderated_objects', filter_moderated_objects)
ObjectDoesNotExist
dataset/ETHPy150Open dominno/django-moderation/tests/tests/unit/testregister.py/IntegrityErrorRegressionTestCase.setUp
5,705
def repeater(index): """repeats the last command with the given index """ global __last_commands__ try: call_data = __last_commands__[index] return call_data[0](*call_data[1], **call_data[2]) except __HOLE__: return None
IndexError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/repeater
5,706
@classmethod def generate_repr_of_all_references(cls, generate_gpu=True, generate_ass=True, skip_existing=False): """generates all representations of all references of this scene """ from anima.ui.progress_dialog import ProgressDialogManager from anima.env.mayaEnv import Maya, repr_tools, auxiliary reload(auxiliary) reload(repr_tools) paths_visited = [] versions_to_visit = [] versions_cannot_be_published = [] # generate a sorted version list # and visit each reference only once pdm = ProgressDialogManager() use_progress_window = False if not pm.general.about(batch=1): use_progress_window = True all_refs = pm.listReferences(recursive=True) pdm.use_ui = use_progress_window caller = pdm.register(len(all_refs), 'List References') for ref in reversed(all_refs): ref_path = str(ref.path) caller.step(message=ref_path) if ref_path not in paths_visited: v = ref.version if v is not None: paths_visited.append(ref_path) versions_to_visit.append(v) response = pm.confirmDialog( title='Do Create Representations?', message='Create all Repr. for all %s FileReferences?' % len(versions_to_visit), button=['Yes', 'No'], defaultButton='No', cancelButton='No', dismissString='No' ) if response == 'No': return # register a new caller caller = pdm.register(max_iteration=len(versions_to_visit), title='Generate Reprs') m_env = Maya() source_version = m_env.get_current_version() gen = repr_tools.RepresentationGenerator() # open each version from stalker import Version for v in versions_to_visit: local_generate_gpu = generate_gpu local_generate_ass = generate_ass # check if this is a repr if '@' in v.take_name: # use the parent v = v.parent if not v: continue if skip_existing: # check if there is a GPU or ASS repr # generated from this version child_versions = Version.query.filter(Version.parent == v).all() for cv in child_versions: if local_generate_gpu is True and '@GPU' in cv.take_name: local_generate_gpu = False if local_generate_ass is True and '@ASS' in cv.take_name: local_generate_ass = False gen.version = v # generate representations if local_generate_gpu: try: gen.generate_gpu() except RuntimeError: if v not in versions_cannot_be_published: versions_cannot_be_published.append(v) if local_generate_ass: try: gen.generate_ass() except __HOLE__: if v not in versions_cannot_be_published: versions_cannot_be_published.append(v) caller.step() # now open the source version again m_env.open(source_version, force=True, skip_update_check=True) # and generate representation for the source gen.version = source_version # generate representations if not versions_cannot_be_published: if generate_gpu: gen.generate_gpu() if generate_ass: gen.generate_ass() else: pm.confirmDialog( title='Error', message='The following versions can not be published ' '(check script editor):\n\n%s' % ( '\n'.join( map(lambda x: x.nice_name, versions_cannot_be_published) ) ), button=['OK'], defaultButton='OK', cancelButton='OK', dismissString='OK' ) pm.error( '\n'.join( map(lambda x: x.absolute_full_path, versions_cannot_be_published) ) )
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Reference.generate_repr_of_all_references
5,707
@classmethod def select_zero_uv_area_faces(cls): """selects faces with zero UV area """ def area(p): return 0.5 * abs(sum(x0 * y1 - x1 * y0 for ((x0, y0), (x1, y1)) in segments(p))) def segments(p): return zip(p, p[1:] + [p[0]]) all_meshes = pm.ls( [node.getShape() for node in pm.ls(sl=1)], type='mesh' ) mesh_count = len(all_meshes) from anima.ui.progress_dialog import ProgressDialogManager pdm = ProgressDialogManager() if not pm.general.about(batch=1) and mesh_count: pdm.use_ui = True caller = pdm.register(mesh_count, 'check_uvs()') faces_with_zero_uv_area = [] for node in all_meshes: all_uvs = node.getUVs() for i in range(node.numFaces()): uvs = [] try: for j in range(node.numPolygonVertices(i)): #uvs.append(node.getPolygonUV(i, j)) uv_id = node.getPolygonUVid(i, j) uvs.append((all_uvs[0][uv_id], all_uvs[1][uv_id])) if area(uvs) == 0.0: #meshes_with_zero_uv_area.append(node) #break faces_with_zero_uv_area.append( '%s.f[%s]' % (node.fullPath(), i) ) except __HOLE__: faces_with_zero_uv_area.append( '%s.f[%s]' % (node.fullPath(), i) ) caller.step() if len(faces_with_zero_uv_area) == 0: pm.warning('No Zero UV area polys found!!!') else: pm.select(faces_with_zero_uv_area)
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Modeling.select_zero_uv_area_faces
5,708
@classmethod def reset_tweaks(cls): """Resets the tweaks on the selected deformed objects """ for obj in pm.ls(sl=1): for tweak_node in pm.ls(obj.listHistory(), type=pm.nt.Tweak): try: for i in tweak_node.pl[0].cp.get(mi=1): tweak_node.pl[0].cv[i].vx.set(0) tweak_node.pl[0].cv[i].vy.set(0) tweak_node.pl[0].cv[i].vz.set(0) except __HOLE__: try: for i in tweak_node.vl[0].vt.get(mi=1): tweak_node.vl[0].vt[i].vx.set(0) tweak_node.vl[0].vt[i].vy.set(0) tweak_node.vl[0].vt[i].vz.set(0) except TypeError: pass
TypeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Rigging.reset_tweaks
5,709
@classmethod def convert_to_linear(cls): """adds a gamma_gain node in between the selected nodes outputs to make the result linear """ # # convert to linear # selection = pm.ls(sl=1) for file_node in selection: # get the connections outputs = file_node.outputs(plugs=True) if not len(outputs): continue # and insert a mip_gamma_gain gamma_node = pm.createNode('mip_gamma_gain') gamma_node.setAttr('gamma', 2.2) gamma_node.setAttr('reverse', True) # connect the file_node to gamma_node try: file_node.outValue >> gamma_node.input file_node.outValueA >> gamma_node.inputA except __HOLE__: file_node.outColor >> gamma_node.input # do all the connections from the output of the gamma for output in outputs: try: gamma_node.outValue >> output except RuntimeError: gamma_node.outValueA >> output pm.select(selection)
AttributeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Render.convert_to_linear
5,710
@classmethod def enable_matte(cls, color=0): """enables matte on selected objects """ # # Enable Matte on Selected Objects # colors = [ [0, 0, 0, 0], # Not Visible [1, 0, 0, 0], # Red [0, 1, 0, 0], # Green [0, 0, 1, 0], # Blue [0, 0, 0, 1], # Alpha ] arnold_shaders = ( pm.nt.AiStandard, pm.nt.AiHair, pm.nt.AiSkin, pm.nt.AiUtility ) for node in pm.ls(sl=1, dag=1, type=[pm.nt.Mesh, pm.nt.NurbsSurface, 'aiStandIn']): obj = node #if isinstance(node, pm.nt.Mesh): # obj = node #elif isinstance(node, pm.nt.Transform): # obj = node.getShape() shading_nodes = pm.listConnections(obj, type='shadingEngine') for shadingNode in shading_nodes: shader = shadingNode.attr('surfaceShader').connections()[0] if isinstance(shader, arnold_shaders): try: pm.editRenderLayerAdjustment(shader.attr("aiEnableMatte")) pm.editRenderLayerAdjustment(shader.attr("aiMatteColor")) pm.editRenderLayerAdjustment(shader.attr("aiMatteColorA")) shader.attr("aiEnableMatte").set(1) shader.attr("aiMatteColor").set(colors[color][0:3], type='double3') shader.attr("aiMatteColorA").set(colors[color][3]) except __HOLE__ as e: # there is some connections print(str(e))
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Render.enable_matte
5,711
@classmethod def enable_subdiv(cls): """enables subdiv on selected objects """ # # Set SubDiv to CatClark on Selected nodes # for node in pm.ls(sl=1): shape = node.getShape() try: shape.aiSubdivIterations.set(2) shape.aiSubdivType.set(1) shape.aiSubdivPixelError.set(0) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Render.enable_subdiv
5,712
@classmethod def normalize_sss_weights(cls): """normalizes the sss weights so their total weight is 1.0 if a aiStandard is assigned to the selected object it searches for an aiSkin in the emission channel. the script considers 0.7 as the highest diffuse value for aiStandard """ # get the shader of the selected object assigned_shader = pm.ls( pm.ls(sl=1)[0].getShape().outputs(type='shadingEngine')[0].inputs(), mat=1 )[0] if assigned_shader.type() == 'aiStandard': sss_shader = assigned_shader.attr('emissionColor').inputs()[0] diffuse_weight = assigned_shader.attr('Kd').get() else: sss_shader = assigned_shader diffuse_weight = 0 def get_attr_or_texture(attr): if attr.inputs(): # we probably have a texture assigned # so use its multiply attribute texture = attr.inputs()[0] attr = texture.attr('multiply') if isinstance(texture, pm.nt.AiImage): attr = texture.attr('multiply') elif isinstance(texture, pm.nt.File): attr = texture.attr('colorGain') return attr shallow_attr = get_attr_or_texture( sss_shader.attr('shallowScatterWeight') ) mid_attr = get_attr_or_texture(sss_shader.attr('midScatterWeight')) deep_attr = get_attr_or_texture(sss_shader.attr('deepScatterWeight')) shallow_weight = shallow_attr.get() if isinstance(shallow_weight, tuple): shallow_weight = ( shallow_weight[0] + shallow_weight[1] + shallow_weight[2] ) / 3.0 mid_weight = mid_attr.get() if isinstance(mid_weight, tuple): mid_weight = ( mid_weight[0] + mid_weight[1] + mid_weight[2] ) / 3.0 deep_weight = deep_attr.get() if isinstance(deep_weight, tuple): deep_weight = ( deep_weight[0] + deep_weight[1] + deep_weight[2] ) / 3.0 total_sss_weight = shallow_weight + mid_weight + deep_weight mult = (1 - diffuse_weight / 0.7) / total_sss_weight try: shallow_attr.set(shallow_weight * mult) except RuntimeError: w = shallow_weight * mult shallow_attr.set(w, w, w) try: mid_attr.set(mid_weight * mult) except RuntimeError: w = mid_weight * mult mid_attr.set(w, w, w) try: deep_attr.set(deep_weight * mult) except __HOLE__: w = deep_weight * mult deep_attr.set(w, w, w)
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Render.normalize_sss_weights
5,713
@classmethod def move_cache_files(cls, source_driver, target_driver): """moves the selected cache files to another location :param source_driver: :param target_driver: :return: """ # # Move fur caches to new server # import os import shutil import glob from maya import OpenMayaUI from shiboken import wrapInstance from anima.ui import progress_dialog maya_main_window = wrapInstance( long(OpenMayaUI.MQtUtil.mainWindow()), progress_dialog.QtGui.QWidget ) pdm = ProgressDialogManager(parent=maya_main_window) selected_nodes = pm.ls(sl=1) caller = pdm.register(len(selected_nodes), title='Moving Cache Files') for node in selected_nodes: ass_node = node.getShape() if not isinstance(ass_node, (pm.nt.AiStandIn, pm.nt.AiVolume)): continue if isinstance(ass_node, pm.nt.AiStandIn): ass_path = ass_node.dso.get() elif isinstance(ass_node, pm.nt.AiVolume): ass_path = ass_node.filename.get() ass_path = os.path.normpath( os.path.expandvars(ass_path) ) # give info to user caller.title = 'Moving: %s' % ass_path # check if it is in the source location if source_driver not in ass_path: continue # check if it contains .ass.gz in its path if isinstance(ass_node, pm.nt.AiStandIn): if '.ass.gz' not in ass_path: continue elif isinstance(ass_node, pm.nt.AiVolume): if '.vdb' not in ass_path: continue # get the dirname ass_source_dir = os.path.dirname(ass_path) ass_target_dir = ass_source_dir.replace(source_driver, target_driver) # create the intermediate folders at destination try: os.makedirs( ass_target_dir ) except __HOLE__: # dir already exists pass # get all files list pattern = re.subn(r'[#]+', '*', ass_path)[0].replace('.ass.gz', '.ass*') all_cache_files = glob.glob(pattern) inner_caller = pdm.register(len(all_cache_files)) for source_f in all_cache_files: target_f = source_f.replace(source_driver, target_driver) # move files to new location shutil.move(source_f, target_f) inner_caller.step(message='Moving: %s' % source_f) inner_caller.end_progress() # finally update DSO path if isinstance(ass_node, pm.nt.AiStandIn): ass_node.dso.set(ass_path.replace(source_driver, target_driver)) elif isinstance(ass_node, pm.nt.AiVolume): ass_node.filename.set( ass_path.replace(source_driver, target_driver) ) caller.step() caller.end_progress()
OSError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Render.move_cache_files
5,714
@classmethod def create_alembic(cls, from_top_node=1): """creates alembic cache from selected nodes """ import os root_flag = '-root %(node)s' mel_command = 'AbcExport -j "-frameRange %(start)s %(end)s -ro ' \ '-stripNamespaces -uvWrite -wholeFrameGeo -worldSpace ' \ '%(roots)s ' \ '-file %(path)s";' current_path = pm.workspace.path abc_path = os.path.join(current_path, 'cache', 'alembic') try: os.makedirs(abc_path) except __HOLE__: pass abc_full_path = pm.fileDialog2(startingDirectory=abc_path) def find_top_parent(node): parents = node.listRelatives(p=1) parent = None while parents: parent = parents[0] parents = parent.listRelatives(p=1) if parents: parent = parents[0] else: return parent if not parent: return node else: return parent if abc_full_path: abc_full_path = abc_full_path[0] # this is dirty abc_full_path = os.path.splitext(abc_full_path)[0] + '.abc' # get nodes selection = pm.ls(sl=1) nodes = [] for node in selection: if from_top_node: node = find_top_parent(node) if node not in nodes: nodes.append(node) # generate root flags roots = [] for node in nodes: roots.append( root_flag % { 'node': node.fullPath() } ) roots_as_string = ' '.join(roots) start = int(pm.playbackOptions(q=1, minTime=1)) end = int(pm.playbackOptions(q=1, maxTime=1)) rendered_mel_command = mel_command % { 'start': start, 'end': end, 'roots': roots_as_string, 'path': abc_full_path } pm.mel.eval(rendered_mel_command)
OSError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Animation.create_alembic
5,715
@classmethod def copy_alembic_data(cls, source=None, target=None): """Copies alembic data from source to target hierarchy """ selection = pm.ls(sl=1) if not source or not target: source = selection[0] target = selection[1] # # Move Alembic Data From Source To Target # #selection = pm.ls(sl=1) # #source = selection[0] #target = selection[1] source_nodes = source.listRelatives( ad=1, type=(pm.nt.Mesh, pm.nt.NurbsSurface) ) target_nodes = target.listRelatives( ad=1, type=(pm.nt.Mesh, pm.nt.NurbsSurface) ) source_node_names = [] target_node_names = [] for node in source_nodes: name = node.name().split(':')[-1].split('|')[-1] source_node_names.append(name) for node in target_nodes: name = node.name().split(':')[-1].split('|')[-1] target_node_names.append(name) lut = [] for i, target_node in enumerate(target_nodes): target_node_name = target_node_names[i] try: index = source_node_names.index(target_node_name) except __HOLE__: pass else: lut.append((source_nodes[index], target_nodes[i])) for source_node, target_node in lut: if isinstance(source_node, pm.nt.Mesh): in_attr_name = 'inMesh' out_attr_name = 'outMesh' else: in_attr_name = 'create' out_attr_name = 'worldSpace' conns = source_node.attr(in_attr_name).inputs(p=1) if conns: for conn in conns: if isinstance(conn.node(), pm.nt.AlembicNode): conn >> target_node.attr(in_attr_name) break else: # no connection # just connect the shape itself source_node.attr(out_attr_name) >> \ target_node.attr(in_attr_name)
ValueError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/toolbox.py/Animation.copy_alembic_data
5,716
def _worker_start(): env = None policy = None max_length = None try: while True: msgs = {} # Only fetch the last message of each type while True: try: msg = queue.get_nowait() msgs[msg[0]] = msg[1:] except Empty: break if 'stop' in msgs: break elif 'update' in msgs: env, policy = msgs['update'] # env.start_viewer() elif 'demo' in msgs: param_values, max_length = msgs['demo'] policy.set_param_values(param_values) rollout(env, policy, max_path_length=max_length, animated=True, speedup=5) else: if max_length: rollout(env, policy, max_path_length=max_length, animated=True, speedup=5) except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open rllab/rllab/rllab/plotter/plotter.py/_worker_start
5,717
def generate_gpu(self): """generates the GPU representation of the current scene """ # validate the version first self.version = self._validate_version(self.version) self.open_version(self.version) # load necessary plugins pm.loadPlugin('gpuCache') pm.loadPlugin('AbcExport') pm.loadPlugin('AbcImport') # check if all references have an GPU repr first refs_with_no_gpu_repr = [] for ref in pm.listReferences(): if ref.version and not ref.has_repr('GPU'): refs_with_no_gpu_repr.append(ref) if len(refs_with_no_gpu_repr): raise RuntimeError( 'Please generate the GPU Representation of the references ' 'first!!!\n%s' % '\n'.join(map(lambda x: str(x.path), refs_with_no_gpu_repr)) ) # unload all references for ref in pm.listReferences(): ref.unload() # for local models generate an ABC file output_path = os.path.join( self.version.absolute_path, 'Outputs/alembic/' ).replace('\\', '/') abc_command = \ 'AbcExport -j "-frameRange %(start_frame)s ' \ '%(end_frame)s ' \ '-ro -stripNamespaces ' \ '-uvWrite ' \ '-wholeFrameGeo ' \ '-worldSpace ' \ '-root |%(node)s -file %(file_path)s";' gpu_command = \ 'gpuCache -startTime %(start_frame)s ' \ '-endTime %(end_frame)s ' \ '-optimize -optimizationThreshold 40000 ' \ '-writeMaterials ' \ '-directory "%(path)s" ' \ '-fileName "%(filename)s" ' \ '%(node)s;' start_frame = end_frame = int(pm.currentTime(q=1)) if not self.is_scene_assembly_task(self.version.task): if self.is_vegetation_task(self.version.task): # in vegetation files, we export the GPU files directly from # the Base version, also we use the geometry under # "pfxPolygons" and parent the resulting Stand-In nodes to the # pfxPolygons # load all references for ref in pm.listReferences(): ref.load() # find the _pfxPolygons node pfx_polygons_node = pm.PyNode('kks___vegetation_pfxPolygons') for node in pfx_polygons_node.getChildren(): for child_node in node.getChildren(): child_node_name = child_node.name().split('___')[-1] child_node_shape = child_node.getShape() child_node_shape_name = None if child_node_shape: child_node_shape_name = child_node_shape.name() pm.select(child_node) temp_output_fullpath = \ tempfile.mktemp().replace('\\', '/') temp_output_path, temp_output_filename = \ os.path.split(temp_output_fullpath) output_filename = '%s_%s' % ( self.version.nice_name, child_node_name.split(':')[-1] .replace(':', '_') .replace('|', '_') ) # run the mel command # check if file exists pm.mel.eval( gpu_command % { 'start_frame': start_frame, 'end_frame': end_frame, 'node': child_node.fullPath(), 'path': temp_output_path, 'filename': temp_output_filename } ) cache_file_full_path = \ os.path\ .join(output_path, output_filename + '.abc')\ .replace('\\', '/') # create the intermediate directories try: os.makedirs( os.path.dirname(cache_file_full_path) ) except __HOLE__: # directory exists pass # now move in to its place shutil.move( temp_output_fullpath + '.abc', cache_file_full_path ) # set rotate and scale pivots rp = pm.xform(child_node, q=1, ws=1, rp=1) sp = pm.xform(child_node, q=1, ws=1, sp=1) #child_node.setRotatePivotTranslation([0, 0, 0]) # delete the child and add a GPU node instead pm.delete(child_node) # check if file exists and create nodes if os.path.exists(cache_file_full_path): gpu_node = pm.createNode('gpuCache') gpu_node_tra = gpu_node.getParent() pm.parent(gpu_node_tra, node) gpu_node_tra.rename(child_node_name) if child_node_shape_name is not None: gpu_node.rename(child_node_shape_name) pm.xform(gpu_node_tra, ws=1, rp=rp) pm.xform(gpu_node_tra, ws=1, sp=sp) gpu_node.setAttr( 'cacheFileName', cache_file_full_path, type="string" ) else: print('File not found!: %s' % cache_file_full_path) # clean up other nodes pm.delete('kks___vegetation_pfxStrokes') pm.delete('kks___vegetation_paintableGeos') else: root_nodes = self.get_local_root_nodes() if len(root_nodes): for root_node in root_nodes: # export each child of each root as separate nodes for child_node in root_node.getChildren(): # check if it is a transform node if not isinstance(child_node, pm.nt.Transform): continue if not auxiliary.has_shape(child_node): continue child_name = child_node.name() child_shape = child_node.getShape() child_shape_name = None if child_shape: child_shape_name = child_shape.name() child_full_path = \ child_node.fullPath()[1:].replace('|', '_') temp_output_fullpath = \ tempfile.mktemp().replace('\\', '/') temp_output_path, temp_output_filename = \ os.path.split(temp_output_fullpath) output_filename =\ '%s_%s' % ( self.version.nice_name, child_full_path ) # run the mel command # check if file exists pm.mel.eval( gpu_command % { 'start_frame': start_frame, 'end_frame': end_frame, 'node': child_node.fullPath(), 'path': temp_output_path, 'filename': temp_output_filename } ) cache_file_full_path = \ os.path\ .join( output_path, '%s.abc' % ( output_filename ) )\ .replace('\\', '/') # create the intermediate directories try: os.makedirs( os.path.dirname(cache_file_full_path) ) except OSError: # directory exists pass # now move in to its place shutil.move( temp_output_fullpath + '.abc', cache_file_full_path ) # set rotate and scale pivots rp = pm.xform(child_node, q=1, ws=1, rp=1) sp = pm.xform(child_node, q=1, ws=1, sp=1) # rpt = child_node.getRotatePivotTranslation() # delete the child and add a GPU node instead pm.delete(child_node) # check if file exists if os.path.exists(cache_file_full_path): gpu_node = pm.createNode('gpuCache') gpu_node_tra = gpu_node.getParent() pm.parent(gpu_node_tra, root_node) gpu_node_tra.rename(child_name) if child_shape_name is not None: gpu_node.rename(child_shape_name) pm.xform(gpu_node_tra, ws=1, rp=rp) pm.xform(gpu_node_tra, ws=1, sp=sp) # child_node.setRotatePivotTranslation(rpt) gpu_node.setAttr( 'cacheFileName', cache_file_full_path, type="string" ) # load all references again # convert all references to GPU logger.debug('converting all references to GPU') for ref in pm.listReferences(): # check if this is a Model reference ref.to_repr('GPU') ref.load() # if this is an Exterior/Interior -> Layout -> Hires task flatten it task = self.version.task is_exterior_or_interior_task = self.is_exterior_or_interior_task(task) if is_exterior_or_interior_task: logger.debug('importing all references') # and import all of the references all_refs = pm.listReferences() while len(all_refs) != 0: for ref in all_refs: if not ref.isLoaded(): ref.load() ref.importContents() all_refs = pm.listReferences() # assign lambert1 to all GPU nodes pm.sets('initialShadingGroup', e=1, fe=auxiliary.get_root_nodes()) # clean up self.clean_up() # 6. save the scene as {{original_take}}___GPU # use maya take_name = '%s%s%s' % ( self.base_take_name, Representation.repr_separator, 'GPU' ) v = self.get_latest_repr_version(take_name) self.maya_env.save_as(v) # export the root nodes under the same file if is_exterior_or_interior_task: logger.debug('exporting root nodes') pm.select(auxiliary.get_root_nodes()) pm.exportSelected( v.absolute_full_path, type='mayaAscii', force=True ) logger.debug('renewing scene') # clear scene pm.newFile(force=True)
OSError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/repr_tools.py/RepresentationGenerator.generate_gpu
5,718
@classmethod def clean_up(self): """cleans up the scene """ num_of_items_deleted = pm.mel.eval('MLdeleteUnused') logger.debug('deleting unknown references') delete_nodes_types = ['reference', 'unknown'] for node in pm.ls(type=delete_nodes_types): node.unlock() logger.debug('deleting "delete_nodes_types"') try: pm.delete(pm.ls(type=delete_nodes_types)) except __HOLE__: pass
RuntimeError
dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/repr_tools.py/RepresentationGenerator.clean_up
5,719
def current_items(self, obj): """Display a list of the current position content Each item will be wrapped in a admin change form link if the item is editable via the admin. This list also only shows the first 5 items in order not to clutter up the admin change list. A horizontal rule is placed between returned objects and overlap objects. """ items = [] position_contents = obj.positioncontent_set.all() for i, pobj in enumerate(position_contents[:5]): item = unicode(pobj.content_object) try: reverse_name = u'admin:{}_{}_change'.format( pobj.content_type.app_label, pobj.content_type.model) item_link = reverse(reverse_name, args=[pobj.content_object.id]) item = u'<a href="{}">{}</a>'.format( item_link, unicode(pobj.content_object)) except (NoReverseMatch, __HOLE__): pass items.append(u'<li>{}</li>'.format(item)) if i+1 == obj.count: items.append(u'<hr style="background-color:#a2a2a2;"/>') if position_contents.count() > 5: items.append(u'<br/> and {} more'.format( position_contents.count() - 5)) return u'<ul>{}</ul>'.format(''.join(items))
AttributeError
dataset/ETHPy150Open callowayproject/django-kamasutra/positions/admin.py/PositionAdmin.current_items
5,720
def suggest_special(text): text = text.lstrip() cmd, _, arg = parse_special_command(text) if cmd == text: # Trying to complete the special command itself return (Special(),) if cmd in ('\\c', '\\connect'): return (Database(),) if cmd == '\\dn': return (Schema(),) if arg: # Try to distinguish "\d name" from "\d schema.name" # Note that this will fail to obtain a schema name if wildcards are # used, e.g. "\d schema???.name" parsed = sqlparse.parse(arg)[0].tokens[0] try: schema = parsed.get_parent_name() except __HOLE__: schema = None else: schema = None if cmd[1:] == 'd': # \d can descibe tables or views if schema: return (Table(schema=schema), View(schema=schema),) else: return (Schema(), Table(schema=None), View(schema=None),) elif cmd[1:] in ('dt', 'dv', 'df', 'dT'): rel_type = {'dt': Table, 'dv': View, 'df': Function, 'dT': Datatype, }[cmd[1:]] if schema: return (rel_type(schema=schema),) else: return (Schema(), rel_type(schema=None)) if cmd in ['\\n', '\\ns', '\\nd']: return (NamedQuery(),) return (Keyword(), Special())
AttributeError
dataset/ETHPy150Open dbcli/pgcli/pgcli/packages/sqlcompletion.py/suggest_special
5,721
def test_textile(self): try: import textile except __HOLE__: textile = None textile_content = """Paragraph 1 Paragraph 2 with "quotes" and @code@""" t = Template("{{ textile_content|textile }}") rendered = t.render(Context(locals())).strip() if textile: self.assertEqual(rendered, """<p>Paragraph 1</p> <p>Paragraph 2 with &#8220;quotes&#8221; and <code>code</code></p>""") else: self.assertEqual(rendered, textile_content)
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/lib/django/tests/regressiontests/markup/tests.py/Templates.test_textile
5,722
def test_markdown(self): try: import markdown except __HOLE__: markdown = None markdown_content = """Paragraph 1 ## An h2""" t = Template("{{ markdown_content|markdown }}") rendered = t.render(Context(locals())).strip() if markdown: pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""") self.assert_(pattern.match(rendered)) else: self.assertEqual(rendered, markdown_content)
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/lib/django/tests/regressiontests/markup/tests.py/Templates.test_markdown
5,723
def test_docutils(self): try: import docutils except __HOLE__: docutils = None rest_content = """Paragraph 1 Paragraph 2 with a link_ .. _link: http://www.example.com/""" t = Template("{{ rest_content|restructuredtext }}") rendered = t.render(Context(locals())).strip() if docutils: self.assertEqual(rendered, """<p>Paragraph 1</p> <p>Paragraph 2 with a <a class="reference" href="http://www.example.com/">link</a></p>""") else: self.assertEqual(rendered, rest_content)
ImportError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/lib/django/tests/regressiontests/markup/tests.py/Templates.test_docutils
5,724
def subseq(self, start_offset=0, end_offset=None): """ Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element """ from sebastian.core import DURATION_64 def subseq_iter(start_offset, end_offset): cur_offset = 0 for point in self._elements: try: cur_offset += point[DURATION_64] except __HOLE__: raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64) #Skip until start if cur_offset < start_offset: continue #Yield points start_offset <= point < end_offset if end_offset is None or cur_offset < end_offset: yield point else: raise StopIteration return HSeq(subseq_iter(start_offset, end_offset))
KeyError
dataset/ETHPy150Open jtauber/sebastian/sebastian/core/elements.py/HSeq.subseq
5,725
def fetch_genes(id_list): """Fetch Entrez Gene records using Bio.Entrez, in particular epost (to submit the data to NCBI) and efetch to retrieve the information, then use Entrez.read to parse the data. Returns a list of parsed gene records. """ request = Entrez.epost("gene",id=",".join(id_list)) try: result = Entrez.read(request) except __HOLE__ as e: #FIXME: How generate NAs instead of causing an error with invalid IDs? print "An error occurred while retrieving the annotations." print "The error returned was %s" % e sys.exit(-1) webEnv = result["WebEnv"] queryKey = result["QueryKey"] efetch_result = Entrez.efetch(db="gene", webenv=webEnv, query_key = queryKey, retmode="xml") genes = Entrez.read(efetch_result) #print "Retrieved %d records for %d genes" % (len(genes),len(id_list)) return genes
RuntimeError
dataset/ETHPy150Open taoliu/taolib/Scripts/convert_gene_ids.py/fetch_genes
5,726
def parse_genes(genes): """Parse various gene information including: 1. Species name (taxonomy name) 2. Entrez gene ID 3. Official symbol 4. RefSeq IDs 5. Offical full name Basically, just to go through the parsed xml data.... A big headache to figure it out... Return a list of dictionary. """ gene_info_list = [] for gene_data in genes: gene_info = {} # get entrez ID try: gene_info["entrez_id"] = gene_data["Entrezgene_track-info"]["Gene-track"]["Gene-track_geneid"] except __HOLE__: gene_info["entrez_id"] = "" continue gene_info["refseq_ids"] = [] for comment in gene_data.get("Entrezgene_comments",[]): # look for refSeq annotation if comment.get("Gene-commentary_heading",None) == "NCBI Reference Sequences (RefSeq)": # get sub-comments for subcomment in comment.get("Gene-commentary_comment",[]): for product in subcomment.get("Gene-commentary_products",[]): if product.get("Gene-commentary_heading",None) == "mRNA Sequence": gene_info["refseq_ids"].append(product.get("Gene-commentary_accession","")) # get properties gene_info["official_symbol"] = "" # optional gene_info["official_full_name"] = "" # optional for gene_property in gene_data.get("Entrezgene_properties",[]): if gene_property.get("Gene-commentary_label",None) == "Nomenclature": for sub_property in gene_property["Gene-commentary_properties"]: if sub_property.get("Gene-commentary_label",None) == "Official Symbol": gene_info["official_symbol"] = sub_property.get("Gene-commentary_text","") if sub_property.get("Gene-commentary_label",None) == "Official Full Name": gene_info["official_full_name"] = sub_property.get("Gene-commentary_text","") # get taxname try: gene_info["taxname"] = gene_data["Entrezgene_source"]["BioSource"]["BioSource_org"]["Org-ref"]["Org-ref_taxname"] except KeyError: gene_info["taxname"] = "" continue gene_info_list.append(gene_info) return gene_info_list
KeyError
dataset/ETHPy150Open taoliu/taolib/Scripts/convert_gene_ids.py/parse_genes
5,727
def handle(self, *args, **options): raise CommandError( 'copy_group_data is currently broken. ' 'Ask Danny or Ethan to fix it along the lines of ' 'https://github.com/dimagi/commcare-hq/pull/9180/files#diff-9d976dc051a36a028c6604581dfbce5dR95' ) if len(args) != 2: raise CommandError('Usage is copy_group_data %s' % self.args) sourcedb = Database(args[0]) group_id = args[1] exclude_user_owned = options["exclude_user_owned"] print 'getting group' group = Group.wrap(sourcedb.get(group_id)) group.save(force_update=True) print 'getting domain' domain = Domain.wrap( sourcedb.view('domain/domains', key=group.domain, include_docs=True, reduce=False, limit=1).one()['doc'] ) dt = DocumentTransform(domain._obj, sourcedb) save(dt, Domain.get_db()) owners = [group_id] if not exclude_user_owned: owners.extend(group.users) print 'getting case ids' with OverrideDB(CommCareCase, sourcedb): case_ids = get_case_ids_in_domain_by_owner( domain.name, owner_id__in=owners) xform_ids = set() print 'copying %s cases' % len(case_ids) for i, subset in enumerate(chunked(case_ids, CHUNK_SIZE)): print i * CHUNK_SIZE cases = [CommCareCase.wrap(case['doc']) for case in sourcedb.all_docs( keys=list(subset), include_docs=True, )] for case in cases: xform_ids.update(case.xform_ids) self.lenient_bulk_save(CommCareCase, cases) if not exclude_user_owned: # also grab submissions that may not have included any case data for user_id in group.users: xform_ids.update(res['id'] for res in sourcedb.view( 'all_forms/view', startkey=['submission user', domain.name, user_id], endkey=['submission user', domain.name, user_id, {}], reduce=False )) print 'copying %s xforms' % len(xform_ids) user_ids = set(group.users) def form_wrapper(row): doc = row['doc'] doc.pop('_attachments', None) return XFormInstance.wrap(doc) for i, subset in enumerate(chunked(xform_ids, CHUNK_SIZE)): print i * CHUNK_SIZE xforms = sourcedb.all_docs( keys=list(subset), include_docs=True, wrapper=form_wrapper, ).all() self.lenient_bulk_save(XFormInstance, xforms) for xform in xforms: user_id = xform.metadata.userID user_ids.add(user_id) print 'copying %s users' % len(user_ids) def wrap_user(row): try: doc = row['doc'] except __HOLE__: logging.exception('trouble with user result %r' % row) return None try: return CouchUser.wrap_correctly(doc) except Exception: logging.exception('trouble with user %s' % doc['_id']) return None users = sourcedb.all_docs( keys=list(user_ids), include_docs=True, wrapper=wrap_user, ).all() role_ids = set([]) for user in filter(lambda u: u is not None, users): # if we use bulk save, django user doesn't get sync'd domain_membership = user.get_domain_membership(domain.name) if domain_membership and domain_membership.role_id: role_ids.add(user.domain_membership.role_id) user.save(force_update=True) print 'copying %s roles' % len(role_ids) for i, subset in enumerate(chunked(role_ids, CHUNK_SIZE)): roles = [UserRole.wrap(role['doc']) for role in sourcedb.all_docs( keys=list(subset), include_docs=True, )] self.lenient_bulk_save(UserRole, roles) if options['include_sync_logs']: print 'copying sync logs' for user_id in user_ids: log_ids = [res['id'] for res in sourcedb.view("phone/sync_logs_by_user", startkey=[user_id, {}], endkey=[user_id], descending=True, reduce=False, include_docs=True )] print 'user: %s, logs: %s' % (user_id, len(log_ids)) for i, subset in enumerate(chunked(log_ids, CHUNK_SIZE)): print i * CHUNK_SIZE logs = [SyncLog.wrap(log['doc']) for log in sourcedb.all_docs( keys=list(subset), include_docs=True, )] self.lenient_bulk_save(SyncLog, logs)
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/domainsync/management/commands/copy_group_data.py/Command.handle
5,728
def main(): try: si = None try: print "Trying to connect to VCENTER SERVER . . ." si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'], version="vim.version.version8") except __HOLE__, e: pass atexit.register(Disconnect, si) print "Connected to VCENTER SERVER !" content = si.RetrieveContent() host = get_obj(content, [vim.HostSystem], inputs['host_name']) host_network_system = host.configManager.networkSystem # for pnic in host.config.network.pnic: # if pnic.device == inputs['nic_name']: # pnic_key = pnic.key create_vswitch(host_network_system, inputs['switch_name'], inputs['num_ports'], inputs['nic_name']) create_port_group(host_network_system, inputs['port_group_name'], inputs['switch_name']) #add_virtual_nic(host_network_system, inputs['port_group_name']) except vmodl.MethodFault, e: print "Caught vmodl fault: %s" % e.msg return 1 except Exception, e: print "Caught exception: %s" % str(e) return 1 # Start program
IOError
dataset/ETHPy150Open rreubenur/vmware-pyvmomi-examples/create_vswitch_and_portgroup.py/main
5,729
def _crosscat_metadata(self, bdb, generator_id): cc_cache = self._crosscat_cache(bdb) if cc_cache is not None and generator_id in cc_cache.metadata: return cc_cache.metadata[generator_id] sql = ''' SELECT metadata_json FROM bayesdb_crosscat_metadata WHERE generator_id = ? ''' cursor = bdb.sql_execute(sql, (generator_id,)) try: row = cursor.next() except __HOLE__: generator = core.bayesdb_generator_name(bdb, generator_id) raise BQLError(bdb, 'No crosscat metadata for generator: %s' % (generator,)) else: metadata = json.loads(row[0]) if cc_cache is not None: cc_cache.metadata[generator_id] = metadata return metadata
StopIteration
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/CrosscatMetamodel._crosscat_metadata
5,730
def _crosscat_theta(self, bdb, generator_id, modelno): cc_cache = self._crosscat_cache(bdb) if cc_cache is not None and \ generator_id in cc_cache.thetas and \ modelno in cc_cache.thetas[generator_id]: return cc_cache.thetas[generator_id][modelno] sql = ''' SELECT theta_json FROM bayesdb_crosscat_theta WHERE generator_id = ? AND modelno = ? ''' cursor = bdb.sql_execute(sql, (generator_id, modelno)) try: row = cursor.next() except __HOLE__: generator = core.bayesdb_generator_name(bdb, generator_id) raise BQLError(bdb, 'No such crosscat model for generator %s: %d' % (repr(generator), modelno)) else: theta = json.loads(row[0]) if cc_cache is not None: if generator_id in cc_cache.thetas: assert modelno not in cc_cache.thetas[generator_id] cc_cache.thetas[generator_id][modelno] = theta else: cc_cache.thetas[generator_id] = {modelno: theta} return theta
StopIteration
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/CrosscatMetamodel._crosscat_theta
5,731
def register(self, bdb): with bdb.savepoint(): schema_sql = 'SELECT version FROM bayesdb_metamodel WHERE name = ?' cursor = bdb.sql_execute(schema_sql, (self.name(),)) version = None try: row = cursor.next() except __HOLE__: version = 0 else: version = row[0] assert version is not None if version == 0: # XXX WHATTAKLUDGE! for stmt in crosscat_schema_1.split(';'): bdb.sql_execute(stmt) version = 1 if version == 1: # XXX WHATTAKLUDGE! for stmt in crosscat_schema_1to2.split(';'): bdb.sql_execute(stmt) # We never recorded diagnostics in the past, so we # can't fill the table in with historical data. But # we did create stub entries in the theta dicts which # serve no purpose now, so nuke them. sql = ''' SELECT generator_id, modelno, theta_json FROM bayesdb_crosscat_theta ''' update_sql = ''' UPDATE bayesdb_crosscat_theta SET theta_json = :theta_json WHERE generator_id = :generator_id AND modelno = :modelno ''' for generator_id, modelno, theta_json in bdb.sql_execute(sql): theta = json.loads(theta_json) if len(theta['logscore']) != 0 or \ len(theta['num_views']) != 0 or \ len(theta['column_crp_alpha']) != 0: raise IOError('Non-stub diagnostics!') del theta['logscore'] del theta['num_views'] del theta['column_crp_alpha'] self._theta_validator.validate(theta) theta_json = json.dumps(theta) bdb.sql_execute(update_sql, { 'generator_id': generator_id, 'modelno': modelno, 'theta_json': theta_json, }) version = 2 if version == 2: for stmt in crosscat_schema_2to3.split(';'): bdb.sql_execute(stmt) version = 3 if version == 3: cursor = bdb.sql_execute(''' SELECT generator_id FROM bayesdb_crosscat_metadata WHERE NOT EXISTS (SELECT * FROM bayesdb_crosscat_subsampled AS s WHERE s.generator_id = generator_id) ''') for (generator_id,) in cursor: bdb.sql_execute(''' INSERT INTO bayesdb_crosscat_subsampled (generator_id) VALUES (?) ''', (generator_id,)) table_name = core.bayesdb_generator_table(bdb, generator_id) qt = sqlite3_quote_name(table_name) bdb.sql_execute(''' INSERT INTO bayesdb_crosscat_subsample (generator_id, sql_rowid, cc_row_id) SELECT ?, _rowid_, _rowid_ - 1 FROM %s ''' % (qt,), (generator_id,)) for stmt in crosscat_schema_3to4.split(';'): bdb.sql_execute(stmt) version = 4 if version == 4: for stmt in crosscat_schema_4to5.split(';'): bdb.sql_execute(stmt) version = 5 if version == 5: for stmt in crosscat_schema_5to6.split(';'): bdb.sql_execute(stmt) version = 6 if version != 6: raise BQLError(bdb, 'Crosscat already installed' ' with unknown schema version: %d' % (version,))
StopIteration
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/CrosscatMetamodel.register
5,732
def logpdf_joint(self, bdb, generator_id, targets, constraints, modelno=None): M_c = self._crosscat_metadata(bdb, generator_id) try: for _, colno, value in constraints: crosscat_value_to_code(bdb, generator_id, M_c, colno, value) except KeyError: # Probability with constraint that has no code return float('nan') try: for _, colno, value in targets: crosscat_value_to_code(bdb, generator_id, M_c, colno, value) except __HOLE__: # Probability of value that has no code return float('-inf') X_L_list = self._crosscat_latent_state(bdb, generator_id, modelno) X_D_list = self._crosscat_latent_data(bdb, generator_id, modelno) Q, Y, X_L_list, X_D_list = self._crosscat_remap_two( bdb, generator_id, X_L_list, X_D_list, targets, constraints) r = self._crosscat.predictive_probability_multistate( M_c=M_c, X_L_list=X_L_list, X_D_list=X_D_list, Y=Y, Q=Q, ) return r
KeyError
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/CrosscatMetamodel.logpdf_joint
5,733
def crosscat_value_to_code(bdb, generator_id, M_c, colno, value): stattype = core.bayesdb_generator_column_stattype(bdb, generator_id, colno) if stattype == 'categorical': # For hysterical raisins, code_to_value and value_to_code are # backwards. # # XXX Fix this. if value is None: return float('NaN') # XXX !?!??! cc_colno = crosscat_cc_colno(bdb, generator_id, colno) key = unicode(value) code = M_c['column_metadata'][cc_colno]['code_to_value'][key] # XXX Crosscat expects floating-point codes. return float(code) elif stattype in ('cyclic', 'numerical'): # Data may be stored in the SQL table as strings, if imported # from wacky sources like CSV files, in which case both NULL # and non-numerical data -- including the string `nan' which # makes sense, and anything else which doesn't -- will be # represented by NaN. try: return float(value) except (ValueError, __HOLE__): return float('NaN') else: raise KeyError
TypeError
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/crosscat_value_to_code
5,734
def crosscat_cc_colno(bdb, generator_id, colno): sql = ''' SELECT cc_colno FROM bayesdb_crosscat_column WHERE generator_id = ? AND colno = ? ''' cursor = bdb.sql_execute(sql, (generator_id, colno)) try: row = cursor.next() except __HOLE__: generator = core.bayesdb_generator_name(bdb, generator_id) colname = core.bayesdb_generator_column_name(bdb, generator_id, colno) raise BQLError(bdb, 'Column not modelled in generator %s: %s' % (repr(generator), repr(colname))) else: assert len(row) == 1 assert isinstance(row[0], int) return row[0]
StopIteration
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/crosscat_cc_colno
5,735
def crosscat_gen_colno(bdb, generator_id, cc_colno): sql = ''' SELECT colno FROM bayesdb_crosscat_column WHERE generator_id = ? AND cc_colno = ? ''' cursor = bdb.sql_execute(sql, (generator_id, cc_colno)) try: row = cursor.next() except __HOLE__: generator = core.bayesdb_generator_name(bdb, generator_id) colname = core.bayesdb_generator_column_name(bdb, generator_id, cc_colno) raise BQLError(bdb, 'Column not Crosscat-modelled' ' in generator %s: %s' % (repr(generator), repr(colname))) else: assert len(row) == 1 assert isinstance(row[0], int) return row[0]
StopIteration
dataset/ETHPy150Open probcomp/bayeslite/src/metamodels/crosscat.py/crosscat_gen_colno
5,736
def _get_model(model_identifier): """ Helper to look up a model from an "app_label.module_name" string. """ try: Model = models.get_model(*model_identifier.split(".")) except __HOLE__: Model = None if Model is None: raise base.DeserializationError(u"Invalid model identifier: '%s'" % model_identifier) return Model
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/core/serializers/python.py/_get_model
5,737
def previous_current_next(items): """ From http://www.wordaligned.org/articles/zippy-triples-served-with-python Creates an iterator which returns (previous, current, next) triples, with ``None`` filling in when there is no previous or next available. """ extend = itertools.chain([None], items, [None]) previous, current, next = itertools.tee(extend, 3) try: current.next() next.next() next.next() except __HOLE__: pass return itertools.izip(previous, current, next)
StopIteration
dataset/ETHPy150Open agiliq/django-socialnews/socialnews/mptt/utils.py/previous_current_next
5,738
def lastfm_get_tree(method, **kwargs): for k, v in kwargs.items(): kwargs[k] = unicode(v).encode('utf-8') url = 'http://ws.audioscrobbler.com/2.0/?api_key=%s&method=%s&%s' % ( settings.LASTFM_API_KEY, method, urllib.urlencode(kwargs) ) print url try: tree = etree.parse(url) return tree except __HOLE__: print "Did not work: "+url return None
IOError
dataset/ETHPy150Open jpic/playlistnow.fm/apps/music/lastfm_api.py/lastfm_get_tree
5,739
@classmethod def utcfromtimestamp(cls, timestamp, allow_future=False): """Converts a str or int epoch time to datetime. Note: this method drops ms from timestamps. Args: timestamp: str, int, or float epoch timestamp. allow_future: boolean, default False, True to allow future timestamps. Returns: datetime representation of the timestamp. Raises: ValueError: timestamp is invalid. EpochValueError: the timestamp is valid, but unacceptable. EpochFutureValueError: timestamp under an hour in future. EpochExtremeFutureValueError: timestamp over an hour in future. """ try: timestamp = int(float(timestamp)) dt = datetime.datetime.utcfromtimestamp(timestamp) except (TypeError, __HOLE__): raise ValueError( 'timestamp is None, empty, or otherwise invalid: %s' % timestamp) now = datetime.datetime.utcnow() if not allow_future and dt > now: msg = 'datetime in the future: %s' % dt if dt > (now + datetime.timedelta(minutes=66)): # raise a slightly different exception for > 66mins to allow for more # verbose logging. raise EpochExtremeFutureValueError(msg) raise EpochFutureValueError(msg) return dt
ValueError
dataset/ETHPy150Open google/simian/src/simian/mac/common/util.py/Datetime.utcfromtimestamp
5,740
def Serialize(obj): """Return a binary serialized version of object. Depending on the serialization method, some complex objects or input formats may not be serializable. UTF-8 strings (by themselves or in other structures e.g. lists) are always supported. Args: obj: any object Returns: str, possibly containing ascii values >127 Raises: SerializeError: if an error occured during serialization """ try: return json.dumps(obj) except __HOLE__ as e: raise SerializeError(e)
TypeError
dataset/ETHPy150Open google/simian/src/simian/mac/common/util.py/Serialize
5,741
def Deserialize(s, parse_float=float): """Return an object for a binary serialized version. Depending on the target platform, precision of float values may be lowered on deserialization. Use parse_float to provide an alternative floating point translation function, e.g. decimal.Decimal, if retaining high levels of float precision (> ~10 places) is important. Args: s: str parse_float: callable, optional, to translate floating point values Returns: any object that was serialized Raises: DeserializeError: if an error occured during deserialization """ try: if s is None: raise DeserializeError('Nothing to deserialize: %s' % type(s)) return json.loads(s, parse_float=parse_float) except __HOLE__ as e: raise DeserializeError(e)
ValueError
dataset/ETHPy150Open google/simian/src/simian/mac/common/util.py/Deserialize
5,742
@classmethod def get_row_by_pk(self, ar, pk): """ `dbtables.Table` overrides this. """ try: return ar.data_iterator[int(pk)-1] except (__HOLE__, IndexError): return None
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/core/tables.py/AbstractTable.get_row_by_pk
5,743
def parse_str(string): try: return float(string) except __HOLE__: prefixes = dict(prefixes_SI.items() + prefixes_IEC.items()) for prefix, val in prefixes.items(): if string.endswith(prefix): return float(string.replace(prefix, '')) * val raise Exception("I didn't understand '%s'" % string)
ValueError
dataset/ETHPy150Open vimeo/graph-explorer/graph_explorer/convert.py/parse_str
5,744
def __new__(cls, config=None, **kwargs): """Immutable constructor. If 'config' is non-None all configuration options will default to the value it contains unless the configuration option is explicitly set to 'None' in the keyword arguments. If 'config' is None then all configuration options default to None. Args: config: Optional base configuration providing default values for parameters not specified in the keyword arguments. **kwargs: Configuration options to store on this object. Returns: Either a new Configuration object or (if it would be equivalent) the config argument unchanged, but never None. """ if config is None: pass elif isinstance(config, BaseConfiguration): if cls is config.__class__ and config.__is_stronger(**kwargs): return config for key, value in config._values.iteritems(): if issubclass(cls, config._options[key]._cls): kwargs.setdefault(key, value) else: raise datastore_errors.BadArgumentError( 'config argument should be Configuration (%r)' % (config,)) obj = super(BaseConfiguration, cls).__new__(cls) obj._values = {} for key, value in kwargs.iteritems(): if value is not None: try: config_option = obj._options[key] except __HOLE__, err: raise TypeError('Unknown configuration option (%s)' % err) value = config_option.validator(value) if value is not None: obj._values[key] = value return obj
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/datastore/datastore_rpc.py/BaseConfiguration.__new__
5,745
def _remove_pending(self, rpc): """Remove an RPC object from the list of pending RPCs. If the argument is a MultiRpc object, the wrapped RPCs are removed from the list of pending RPCs. """ if isinstance(rpc, MultiRpc): for wrapped_rpc in rpc._MultiRpc__rpcs: self._remove_pending(wrapped_rpc) else: try: self.__pending_rpcs.remove(rpc) except __HOLE__: pass
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/datastore/datastore_rpc.py/BaseConnection._remove_pending
5,746
def execute_request(self, request): try: # This is how the carbon graphite server parses the line. We could be more forgiving but if it works # for them, then we can do it as well. metric, value, orig_timestamp = request.strip().split() value = float(value) orig_timestamp = float(orig_timestamp) # Include the time that the original graphite request said to associate with the metric value. self.__logger.emit_value(metric, value, extra_fields={'orig_time': orig_timestamp}) except __HOLE__: self.__logger.warn('Could not parse incoming metric line from graphite plaintext server, ignoring', error_code='graphite_monitor/badPlainTextLine')
ValueError
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/builtin_monitors/graphite_monitor.py/GraphiteTextServer.execute_request
5,747
def execute_request(self, request): # noinspection PyBroadException try: # Use pickle to read the binary data. data_object = pickle.loads(request) except: # pickle.loads is document as raising any type of exception, so have to catch them all. self.__logger.warn('Could not parse incoming metric line from graphite pickle server, ignoring', error_code='graphite_monitor/badUnpickle') return try: # The format should be [[ metric [ timestamp, value]] ... ] for (metric, datapoint) in data_object: value = float(datapoint[1]) orig_timestamp = float(datapoint[0]) self.__logger.emit_value(metric, value, extra_fields={'orig_time': orig_timestamp}) except __HOLE__: self.__logger.warn('Could not parse incoming metric line from graphite pickle server, ignoring', error_code='graphite_monitor/badPickleLine')
ValueError
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/builtin_monitors/graphite_monitor.py/GraphitePickleServer.execute_request
5,748
def get_proxy(self): """ cycly generate the proxy server string """ try: proxy = next(self.it_proxy) LOG.debug('fetched proxy : %s' % proxy) return proxy except __HOLE__: LOG.debug('.... re-iterating proxies') self.it_proxy = iter(self.proxies) proxy = next(self.it_proxy) LOG.debug('fetched proxy : %s' % proxy) return proxy
StopIteration
dataset/ETHPy150Open sk1418/zhuaxia/zhuaxia/proxypool.py/ProxyPool.get_proxy
5,749
def run_single_instance(self, args): # code for single instance of the application # based on the C++ solution available at # http://wiki.qtcentre.org/index.php?title=SingleApplication if QtCore.QT_VERSION >= 0x40400: self._unique_key = os.path.join(system.home_directory(), "vistrails-single-instance-check-%s"%getpass.getuser()) self.shared_memory = QtCore.QSharedMemory(self._unique_key) self.local_server = None if self.shared_memory.attach(): self._is_running = True local_socket = QtNetwork.QLocalSocket(self) local_socket.connectToServer(self._unique_key) if not local_socket.waitForConnected(self.timeout): debug.critical( "Connection failed: %s\n" "Removing socket" % (local_socket.errorString())) try: os.remove(self._unique_key) except __HOLE__, e: debug.critical("Couldn't remove socket: %s" % self._unique_key, e) else: if self.found_another_instance_running(local_socket, args): return APP_DONE # success, we should shut down else: # This is bad, but not fatal. Let's keep going... debug.critical("Failed to communicate with existing " "instance") return if not self.shared_memory.create(1): debug.critical("Unable to create single instance " "of vistrails application") return self.local_server = QtNetwork.QLocalServer(self) self.connect(self.local_server, QtCore.SIGNAL("newConnection()"), self.message_received) if self.local_server.listen(self._unique_key): debug.log("Listening on %s"%self.local_server.fullServerName()) else: # This usually happens when vistrails have crashed # Delete the key and try again self.shared_memory.detach() self.local_server.close() if os.path.exists(self._unique_key): os.remove(self._unique_key) self.shared_memory = QtCore.QSharedMemory(self._unique_key) self.local_server = None if not self.shared_memory.create(1): debug.critical("Unable to create single instance " "of vistrails application") return self.local_server = QtNetwork.QLocalServer(self) self.connect(self.local_server, QtCore.SIGNAL("newConnection()"), self.message_received) if self.local_server.listen(self._unique_key): debug.log("Listening on %s"%self.local_server.fullServerName()) else: debug.warning( "Server is not listening. This means it " "will not accept parameters from other " "instances") return None
OSError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/application.py/VistrailsApplicationSingleton.run_single_instance
5,750
def parse_input_args_from_other_instance(self, msg): options_re = re.compile(r"^(\[('([^'])*', ?)*'([^']*)'\])|(\[\s?\])$") if options_re.match(msg): #it's safe to eval as a list args = literal_eval(msg) if isinstance(args, list): try: conf_options = self.read_options(args) except __HOLE__: debug.critical("Invalid options: %s" % ' '.join(args)) return False try: # Execute using persistent configuration + new temp configuration old_temp_conf = self.temp_configuration self.startup.temp_configuration = copy.copy(self.configuration) self.temp_configuration.update(conf_options) interactive = not self.temp_configuration.check('batch') if interactive: result = self.process_interactive_input() if self.temp_configuration.showWindow: # in some systems (Linux and Tiger) we need to make both calls # so builderWindow is activated self.builderWindow.raise_() self.builderWindow.activateWindow() return result else: return self.noninteractiveMode() finally: self.startup.temp_configuration = old_temp_conf else: debug.critical("Invalid string: %s" % msg) else: debug.critical("Invalid input: %s" % msg) return False
SystemExit
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/application.py/VistrailsApplicationSingleton.parse_input_args_from_other_instance
5,751
def linux_default_application_set(): """linux_default_application_set() -> True|False|None For Linux - checks if a handler is set for .vt and .vtl files. """ command = ['xdg-mime', 'query', 'filetype', os.path.join(system.vistrails_root_directory(), 'tests', 'resources', 'terminator.vt')] try: output = [] result = system.execute_cmdline(command, output) if result != 0: # something is wrong, abort debug.warning("Error checking mimetypes: %s" % output[0]) return None except __HOLE__, e: debug.warning("Error checking mimetypes: %s" % e.message) return None if 'application/x-vistrails' == output[0].strip(): return True return False
OSError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/application.py/linux_default_application_set
5,752
def linux_update_default_application(): """ update_default_application() -> None For Linux - checks if we should install vistrails as the default application for .vt and .vtl files. If replace is False, don't replace an existing handler. Returns True if installation succeeded. """ root = system.vistrails_root_directory() home = os.path.expanduser('~') # install mime type command = ['xdg-mime', 'install', os.path.join(system.vistrails_root_directory(), 'gui/resources/vistrails-mime.xml')] output = [] try: result = system.execute_cmdline(command, output) except OSError: result = None if result != 0: debug.warning("Error running xdg-mime") return False command = ['update-mime-database', home + '/.local/share/mime'] output = [] try: result = system.execute_cmdline(command, output) except __HOLE__: result = None if result != 0: debug.warning("Error running update-mime-database") return False # install icon command = ['xdg-icon-resource', 'install', '--context', 'mimetypes', '--size', '48', os.path.join(system.vistrails_root_directory(), 'gui/resources/images/vistrails_icon_small.png'), 'application-x-vistrails'] output = [] try: result = system.execute_cmdline(command, output) except OSError: result = None if result != 0: debug.warning("Error running xdg-icon-resource") return True # the handler is set anyway # install desktop file dirs = [home + '/.local', home + '/.local/share', home + '/.local/share/applications'] for d in dirs: if not os.path.isdir(d): os.mkdir(d) desktop = """[Desktop Entry] Name=VisTrails Exec=python {root}/run.py %f Icon={root}/gui/resources/images/vistrails_icon_small.png Type=Application MimeType=application/x-vistrails """.format(root=root) f = open(os.path.join(dirs[2], 'vistrails.desktop'), 'w') f.write(desktop) f.close() command = ['update-desktop-database', dirs[2]] output = [] try: result = system.execute_cmdline(command, output) except OSError: result = None if result != 0: debug.warning("Error running update-desktop-database") return True # The initialization must be explicitly signalled. Otherwise, any # modules importing vis_application will try to initialize the entire # app.
OSError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/gui/application.py/linux_update_default_application
5,753
def _show_completions(self, view): try: # TODO: We probably should show completions after other chars. is_after_dot = view.substr(view.sel()[0].b - 1) == '.' except __HOLE__: return if not is_after_dot: return if not AnalysisServer.ping(): return # First, send new content if any. if view.is_dirty() and is_active(view): _logger.debug('sending overlay data for %s', view.file_name()) analyzer.g_server.send_add_content(view) if is_active(view): view.window().run_command('dart_get_completions')
IndexError
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/autocomplete.py/DartIdleAutocomplete._show_completions
5,754
def _in_string_or_comment(self, view): try: return view.match_selector(view.sel()[0].b, 'source.dart string, source.dart comment') except __HOLE__: pass
IndexError
dataset/ETHPy150Open guillermooo/dart-sublime-bundle/autocomplete.py/DartIdleAutocomplete._in_string_or_comment
5,755
def __getitem__(self, key): try: processor, obj, index = self._keymap[key] except __HOLE__: processor, obj, index = self._parent._key_fallback(key) except TypeError: if isinstance(key, slice): l = [] for processor, value in zip(self._processors[key], self._row[key]): if processor is None: l.append(value) else: l.append(processor(value)) return tuple(l) else: raise if index is None: raise exc.InvalidRequestError( "Ambiguous column name '%s' in result set! " "try 'use_labels' option on select statement." % key) if processor is not None: return processor(self._row[index]) else: return self._row[index]
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/BaseRowProxy.__getitem__
5,756
def __getattr__(self, name): try: return self[name] except __HOLE__ as e: raise AttributeError(e.args[0])
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/BaseRowProxy.__getattr__
5,757
def __init__(self, parent, metadata): self._processors = processors = [] # We do not strictly need to store the processor in the key mapping, # though it is faster in the Python version (probably because of the # saved attribute lookup self._processors) self._keymap = keymap = {} self.keys = [] context = parent.context dialect = context.dialect typemap = dialect.dbapi_type_map translate_colname = context._translate_colname self.case_sensitive = dialect.case_sensitive # high precedence key values. primary_keymap = {} for i, rec in enumerate(metadata): colname = rec[0] coltype = rec[1] if dialect.description_encoding: colname = dialect._description_decoder(colname) if translate_colname: colname, untranslated = translate_colname(colname) if dialect.requires_name_normalize: colname = dialect.normalize_name(colname) if context.result_map: try: name, obj, type_ = context.result_map[ colname if self.case_sensitive else colname.lower()] except __HOLE__: name, obj, type_ = \ colname, None, typemap.get(coltype, sqltypes.NULLTYPE) else: name, obj, type_ = \ colname, None, typemap.get(coltype, sqltypes.NULLTYPE) processor = context.get_result_processor(type_, colname, coltype) processors.append(processor) rec = (processor, obj, i) # indexes as keys. This is only needed for the Python version of # RowProxy (the C version uses a faster path for integer indexes). primary_keymap[i] = rec # populate primary keymap, looking for conflicts. if primary_keymap.setdefault( name if self.case_sensitive else name.lower(), rec) is not rec: # place a record that doesn't have the "index" - this # is interpreted later as an AmbiguousColumnError, # but only when actually accessed. Columns # colliding by name is not a problem if those names # aren't used; integer access is always # unambiguous. primary_keymap[name if self.case_sensitive else name.lower()] = rec = (None, obj, None) self.keys.append(colname) if obj: for o in obj: keymap[o] = rec # technically we should be doing this but we # are saving on callcounts by not doing so. # if keymap.setdefault(o, rec) is not rec: # keymap[o] = (None, obj, None) if translate_colname and \ untranslated: keymap[untranslated] = rec # overwrite keymap values with those of the # high precedence keymap. keymap.update(primary_keymap) if parent._echo: context.engine.logger.debug( "Col %r", tuple(x[0] for x in metadata))
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/ResultMetaData.__init__
5,758
def _fetchone_impl(self): try: return self.cursor.fetchone() except __HOLE__: self._non_result()
AttributeError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/ResultProxy._fetchone_impl
5,759
def _fetchmany_impl(self, size=None): try: if size is None: return self.cursor.fetchmany() else: return self.cursor.fetchmany(size) except __HOLE__: self._non_result()
AttributeError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/ResultProxy._fetchmany_impl
5,760
def _fetchall_impl(self): try: return self.cursor.fetchall() except __HOLE__: self._non_result()
AttributeError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/engine/result.py/ResultProxy._fetchall_impl
5,761
def play_sound_file(self, data, rate, ssize, nchannels): try: dsp = ossaudiodev.open('w') except __HOLE__, msg: if msg.args[0] in (errno.EACCES, errno.ENOENT, errno.ENODEV, errno.EBUSY): raise unittest.SkipTest(msg) raise # at least check that these methods can be invoked dsp.bufsize() dsp.obufcount() dsp.obuffree() dsp.getptr() dsp.fileno() # Make sure the read-only attributes work. self.assertFalse(dsp.closed) self.assertEqual(dsp.name, "/dev/dsp") self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode) # And make sure they're really read-only. for attr in ('closed', 'name', 'mode'): try: setattr(dsp, attr, 42) except TypeError: pass else: self.fail("dsp.%s not read-only" % attr) # Compute expected running time of sound sample (in seconds). expected_time = float(len(data)) / (ssize//8) / nchannels / rate # set parameters based on .au file headers dsp.setparameters(AFMT_S16_NE, nchannels, rate) self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time) t1 = time.time() dsp.write(data) dsp.close() t2 = time.time() elapsed_time = t2 - t1 percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100 self.assertTrue(percent_diff <= 10.0, "elapsed time > 10% off of expected time")
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_ossaudiodev.py/OSSAudioDevTests.play_sound_file
5,762
def test_main(): try: dsp = ossaudiodev.open('w') except (ossaudiodev.error, __HOLE__), msg: if msg.args[0] in (errno.EACCES, errno.ENOENT, errno.ENODEV, errno.EBUSY): raise unittest.SkipTest(msg) raise dsp.close() test_support.run_unittest(__name__)
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_ossaudiodev.py/test_main
5,763
def getconfig(func): if func.__name__.startswith('help') or func.__name__ in ('version',): return func def inner(*args, **kwargs): try: repo = Repository(kwargs['config'], kwargs['define']) except NomadIniNotFound as e: sys.stderr.write("Create '%s' to use nomad, example:\n%s\n" % (e, EXAMPLE_INI)) abort('config file not found') except (__HOLE__, NomadError) as e: abort(e) return func(repo=repo, *args, **kwargs) return inner
IOError
dataset/ETHPy150Open piranha/nomad/nomad/__init__.py/getconfig
5,764
@app.command() def create(name, dependencies=('d', [], 'migration dependencies'), prefix_date=('p', False, 'prefix migration name with date'), **opts): '''Create new migration ''' repo = opts['repo'] deps = map(repo.get, dependencies) if prefix_date: name = date.today().strftime('%Y%m%d-') + name path = op.join(repo.path, name) try: os.mkdir(path) except __HOLE__ as e: if e.errno == 17: abort('directory %s already exists' % path) raise with open(op.join(path, 'migration.ini'), 'w') as f: f.write('[nomad]\n') f.write('dependencies = %s\n' % ', '.join(d.name for d in deps)) with open(op.join(path, 'up.sql'), 'w') as f: f.write('-- SQL ALTER statements for database migration\n')
OSError
dataset/ETHPy150Open piranha/nomad/nomad/__init__.py/create
5,765
def _state_session(state): """Given an :class:`.InstanceState`, return the :class:`.Session` associated, if any. """ if state.session_id: try: return _sessions[state.session_id] except __HOLE__: pass return None
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/session.py/_state_session
5,766
def main(argv=sys.argv[1:]): myapp = MusubiApp() if len(argv): try: return myapp.run(argv) except __HOLE__, err: #Command does not exist MusubiApp.log.error(err) sys.exit(2) except KeyboardInterrupt, err: MusubiApp.log.error(err) pass else: return myapp.run(['-h'])
ValueError
dataset/ETHPy150Open cakebread/musubi/musubi/main.py/main
5,767
def fit_regularized(self, start_params=None, method='l1', maxiter='defined_by_method', full_output=1, disp=True, callback=None, alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03, qc_verbose=False, **kwargs): """ Fit the model using a regularized maximum likelihood. The regularization method AND the solver used is determined by the argument method. Parameters ---------- start_params : array-like, optional Initial guess of the solution for the loglikelihood maximization. The default is an array of zeros. method : 'l1' or 'l1_cvxopt_cp' See notes for details. maxiter : Integer or 'defined_by_method' Maximum number of iterations to perform. If 'defined_by_method', then use method defaults (see notes). full_output : bool Set to True to have all available output in the Results object's mle_retvals attribute. The output is dependent on the solver. See LikelihoodModelResults notes section for more information. disp : bool Set to True to print convergence messages. fargs : tuple Extra arguments passed to the likelihood function, i.e., loglike(x,*args) callback : callable callback(xk) Called after each iteration, as callback(xk), where xk is the current parameter vector. retall : bool Set to True to return list of solutions at each iteration. Available in Results object's mle_retvals attribute. alpha : non-negative scalar or numpy array (same size as parameters) The weight multiplying the l1 penalty term trim_mode : 'auto, 'size', or 'off' If not 'off', trim (set to zero) parameters that would have been zero if the solver reached the theoretical minimum. If 'auto', trim params using the Theory above. If 'size', trim params if they have very small absolute value size_trim_tol : float or 'auto' (default = 'auto') For use when trim_mode == 'size' auto_trim_tol : float For sue when trim_mode == 'auto'. Use qc_tol : float Print warning and don't allow auto trim when (ii) (above) is violated by this much. qc_verbose : Boolean If true, print out a full QC report upon failure Notes ----- Extra parameters are not penalized if alpha is given as a scalar. An example is the shape parameter in NegativeBinomial `nb1` and `nb2`. Optional arguments for the solvers (available in Results.mle_settings):: 'l1' acc : float (default 1e-6) Requested accuracy as used by slsqp 'l1_cvxopt_cp' abstol : float absolute accuracy (default: 1e-7). reltol : float relative accuracy (default: 1e-6). feastol : float tolerance for feasibility conditions (default: 1e-7). refinement : int number of iterative refinement steps when solving KKT equations (default: 1). Optimization methodology With :math:`L` the negative log likelihood, we solve the convex but non-smooth problem .. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k| via the transformation to the smooth, convex, constrained problem in twice as many variables (adding the "added variables" :math:`u_k`) .. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k, subject to .. math:: -u_k \\leq \\beta_k \\leq u_k. With :math:`\\partial_k L` the derivative of :math:`L` in the :math:`k^{th}` parameter direction, theory dictates that, at the minimum, exactly one of two conditions holds: (i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0` (ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0` """ ### Set attributes based on method if method in ['l1', 'l1_cvxopt_cp']: cov_params_func = self.cov_params_func_l1 else: raise Exception("argument method == %s, which is not handled" % method) ### Bundle up extra kwargs for the dictionary kwargs. These are ### passed through super(...).fit() as kwargs and unpacked at ### appropriate times alpha = np.array(alpha) assert alpha.min() >= 0 try: kwargs['alpha'] = alpha except __HOLE__: kwargs = dict(alpha=alpha) kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0]) kwargs['trim_mode'] = trim_mode kwargs['size_trim_tol'] = size_trim_tol kwargs['auto_trim_tol'] = auto_trim_tol kwargs['qc_tol'] = qc_tol kwargs['qc_verbose'] = qc_verbose ### Define default keyword arguments to be passed to super(...).fit() if maxiter == 'defined_by_method': if method == 'l1': maxiter = 1000 elif method == 'l1_cvxopt_cp': maxiter = 70 ## Parameters to pass to super(...).fit() # For the 'extra' parameters, pass all that are available, # even if we know (at this point) we will only use one. extra_fit_funcs = {'l1': fit_l1_slsqp} if have_cvxopt and method == 'l1_cvxopt_cp': from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp elif method.lower() == 'l1_cvxopt_cp': message = ("Attempt to use l1_cvxopt_cp failed since cvxopt " "could not be imported") if callback is None: callback = self._check_perfect_pred else: pass # make a function factory to have multiple call-backs mlefit = super(DiscreteModel, self).fit(start_params=start_params, method=method, maxiter=maxiter, full_output=full_output, disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs, cov_params_func=cov_params_func, **kwargs) return mlefit # up to subclasses to wrap results
TypeError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/discrete/discrete_model.py/DiscreteModel.fit_regularized
5,768
def _handle_data(self, endog, exog, missing, hasconst, **kwargs): if data_tools._is_using_ndarray_type(endog, None): endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' elif data_tools._is_using_pandas(endog, None): endog_dummies, ynames, yname = _pandas_to_dummies(endog) else: endog = np.asarray(endog) endog_dummies, ynames = _numpy_to_dummies(endog) yname = 'y' if not isinstance(ynames, dict): ynames = dict(zip(range(endog_dummies.shape[1]), ynames)) self._ynames_map = ynames data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs) data.ynames = yname # overwrite this to single endog name data.orig_endog = endog self.wendog = data.endog # repeating from upstream... for key in kwargs: try: setattr(self, key, data.__dict__.pop(key)) except __HOLE__: pass return data
KeyError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/discrete/discrete_model.py/MultinomialModel._handle_data
5,769
def __getstate__(self): try: #remove unpicklable callback self.mle_settings['callback'] = None except (__HOLE__, KeyError): pass return self.__dict__
AttributeError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/discrete/discrete_model.py/DiscreteResults.__getstate__
5,770
def _maybe_convert_ynames_int(self, ynames): # see if they're integers try: for i in ynames: if ynames[i] % 1 == 0: ynames[i] = str(int(ynames[i])) except __HOLE__: pass return ynames
TypeError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/discrete/discrete_model.py/MultinomialResults._maybe_convert_ynames_int
5,771
def __init__(self, r_or_colorstr, g=None, b=None, a=None): if isinstance(r_or_colorstr, str): assert g is b is a is None, "Ambiguous color arguments" self.r, self.g, self.b, self.a = self._parse_colorstr(r_or_colorstr) elif g is b is a is None: try: self.r, self.g, self.b, self.a = r_or_colorstr except __HOLE__: self.r, self.g, self.b = r_or_colorstr self.a = 1.0 else: self.r = r_or_colorstr self.g = g self.b = b self.a = a if self.a is None: self.a = 1.0
ValueError
dataset/ETHPy150Open caseman/grease/grease/color.py/RGBA.__init__
5,772
def build_pot_file(self, localedir): file_list = self.find_files(".") potfile = os.path.join(localedir, '%s.pot' % str(self.domain)) if os.path.exists(potfile): # Remove a previous undeleted potfile, if any os.unlink(potfile) for f in file_list: try: f.process(self, potfile, self.domain, self.keep_pot) except __HOLE__: self.stdout.write("UnicodeDecodeError: skipped file %s in %s" % (f.file, f.dirpath)) return potfile
UnicodeDecodeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/commands/makemessages.py/Command.build_pot_file
5,773
def it_should_be_iterable(self): srels = _SerializedRelationshipCollection() try: for x in srels: pass except __HOLE__: msg = "_SerializedRelationshipCollection object is not iterable" pytest.fail(msg) # fixtures -------------------------------------------------------
TypeError
dataset/ETHPy150Open scanny/python-pptx/tests/opc/test_pkgreader.py/Describe_SerializedRelationshipCollection.it_should_be_iterable
5,774
def load_raw_results(self): self.experiments = [] for i in range(len(self.folders)): filename = self.folders[i] + Experiment.experiment_output_folder + \ os.sep + Experiment.erperiment_output_filename try: f = open(filename , "r") self.experiments.append(load(f)) f.close() except __HOLE__: print "skipping", filename, "due to IOError" errorfilename=self.folders[i] + ClusterTools.cluster_error_filename try: ef = open(errorfilename) lines=ef.readlines() print "cluster error output" print lines, "\n\n" except IOError: print "could not find cluster error file", errorfilename, "due to IOError" print "loaded", len(self.experiments), "experiments"
IOError
dataset/ETHPy150Open karlnapf/kameleon-mcmc/kameleon_mcmc/experiments/ExperimentAggregator.py/ExperimentAggregator.load_raw_results
5,775
def handle(self, filepath, cols): credentials = [] with open(filepath) as csv_file: reader = unicode_csv_reader(csv_file) try: next(reader) except __HOLE__: raise ValueError('empty csv file: %s' % filepath) for row in reader: credential = { 'name': row[cols['name']], 'login': row[cols.get('login', '')], 'password': row[cols['password']], 'comment': row[cols.get('comment', '')], } credentials.append(credential) return credentials
StopIteration
dataset/ETHPy150Open marcwebbie/passpie/passpie/importers/csv_importer.py/CSVImporter.handle
5,776
def get_raw_data(self, datum): """Returns the raw data for this column, before any filters or formatting are applied to it. This is useful when doing calculations on data in the table. """ # Callable transformations if callable(self.transform): data = self.transform(datum) # Dict lookups elif isinstance(datum, collections.Mapping) and \ self.transform in datum: data = datum.get(self.transform) else: # Basic object lookups try: data = getattr(datum, self.transform) except __HOLE__: msg = _("The attribute %(attr)s doesn't exist on " "%(obj)s.") % {'attr': self.transform, 'obj': datum} msg = termcolors.colorize(msg, **PALETTE['ERROR']) LOG.warning(msg) data = None return data
AttributeError
dataset/ETHPy150Open CiscoSystems/avos/horizon/tables/base.py/Column.get_raw_data
5,777
def get_summation(self): """Returns the summary value for the data in this column if a valid summation method is specified for it. Otherwise returns ``None``. """ if self.summation not in self.summation_methods: return None summation_function = self.summation_methods[self.summation] data = [self.get_raw_data(datum) for datum in self.table.data] data = filter(lambda datum: datum is not None, data) if len(data): try: summation = summation_function(data) for filter_func in self.filters: summation = filter_func(summation) return summation except __HOLE__: pass return None
TypeError
dataset/ETHPy150Open CiscoSystems/avos/horizon/tables/base.py/Column.get_summation
5,778
@staticmethod def parse_action(action_string): """Parses the ``action`` parameter (a string) sent back with the POST data. By default this parses a string formatted as ``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns each of the pieces. The ``row_id`` is optional. """ if action_string: bits = action_string.split(STRING_SEPARATOR) bits.reverse() table = bits.pop() action = bits.pop() try: object_id = bits.pop() except __HOLE__: object_id = None return table, action, object_id
IndexError
dataset/ETHPy150Open CiscoSystems/avos/horizon/tables/base.py/DataTable.parse_action
5,779
@staticmethod def __add_body_from_string(args, body, proxy): proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True)) coll_prop = JMX._collection_prop("Arguments.arguments") header = JMX._element_prop("elementProp", "HTTPArgument") try: header.append(JMX._string_prop("Argument.value", body)) except __HOLE__: logging.warning("Failed to set body: %s", traceback.format_exc()) header.append(JMX._string_prop("Argument.value", "BINARY-STUB")) coll_prop.append(header) args.append(coll_prop) proxy.append(args)
ValueError
dataset/ETHPy150Open Blazemeter/taurus/bzt/jmx.py/JMX.__add_body_from_string
5,780
@staticmethod def __add_body_from_script(args, body, proxy): http_args_coll_prop = JMX._collection_prop("Arguments.arguments") for arg_name, arg_value in body.items(): try: http_element_prop = JMX._element_prop(arg_name, "HTTPArgument") except __HOLE__: logging.warning("Failed to get element property: %s", traceback.format_exc()) http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument") try: http_element_prop.append(JMX._string_prop("Argument.name", arg_name)) except ValueError: logging.warning("Failed to set arg name: %s", traceback.format_exc()) http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB")) try: http_element_prop.append( JMX._string_prop("Argument.value", arg_value if arg_value is not None else '')) except ValueError: logging.warning("Failed to set arg name: %s", traceback.format_exc()) http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB")) http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True)) http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None)) http_element_prop.append(JMX._string_prop("Argument.metadata", '=')) http_args_coll_prop.append(http_element_prop) args.append(http_args_coll_prop) proxy.append(args)
ValueError
dataset/ETHPy150Open Blazemeter/taurus/bzt/jmx.py/JMX.__add_body_from_script
5,781
@staticmethod def __add_hostnameport_2sampler(parsed_url, proxy, url): if parsed_url.scheme: proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme)) if parsed_url.netloc: netloc_parts = parsed_url.netloc.split(':') if netloc_parts[0]: proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0])) if len(netloc_parts) > 1 and netloc_parts[1]: proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1])) else: try: if parsed_url.port: proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port)) else: proxy.append(JMX._string_prop("HTTPSampler.port", "")) except __HOLE__: logging.debug("Non-parsable port: %s", url) proxy.append(JMX._string_prop("HTTPSampler.port", ""))
ValueError
dataset/ETHPy150Open Blazemeter/taurus/bzt/jmx.py/JMX.__add_hostnameport_2sampler
5,782
def string_io(data=None): # cStringIO can't handle unicode ''' Pass data through to stringIO module and return result ''' try: return cStringIO(bytes(data)) except (UnicodeEncodeError, __HOLE__): return StringIO(data)
TypeError
dataset/ETHPy150Open saltstack/salt/salt/_compat.py/string_io
5,783
def test_avg_std(self): # Use integration to test distribution average and standard deviation. # Only works for distributions which do not consume variates in pairs g = random.Random() N = 5000 x = [i/float(N) for i in range(1,N)] for variate, args, mu, sigmasqrd in [ (g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12), (g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0), (g.expovariate, (1.5,), 1/1.5, 1/1.5**2), (g.vonmisesvariate, (1.23, 0), pi, pi**2/3), (g.paretovariate, (5.0,), 5.0/(5.0-1), 5.0/((5.0-1)**2*(5.0-2))), (g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0), gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]: g.random = x[:].pop y = [] for i in range(len(x)): try: y.append(variate(*args)) except __HOLE__: pass s1 = s2 = 0 for e in y: s1 += e s2 += (e - mu) ** 2 N = len(y) self.assertAlmostEqual(s1/N, mu, places=2, msg='%s%r' % (variate.__name__, args)) self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2, msg='%s%r' % (variate.__name__, args))
IndexError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_random.py/TestDistributions.test_avg_std
5,784
def run_query(self, query): connection = None try: connection = connect(**self.configuration.to_dict()) cursor = connection.cursor() cursor.execute(query) column_names = [] columns = [] for column in cursor.description: column_name = column[COLUMN_NAME] column_names.append(column_name) columns.append({ 'name': column_name, 'friendly_name': column_name, 'type': types_map.get(column[COLUMN_TYPE], None) }) rows = [dict(zip(column_names, row)) for row in cursor] data = {'columns': columns, 'rows': rows} json_data = json.dumps(data, cls=JSONEncoder) error = None cursor.close() except DatabaseError as e: logging.exception(e) json_data = None error = e.message except RPCError as e: logging.exception(e) json_data = None error = "Metastore Error [%s]" % e.message except __HOLE__: connection.cancel() error = "Query cancelled by user." json_data = None except Exception as e: logging.exception(e) raise sys.exc_info()[1], None, sys.exc_info()[2] finally: if connection: connection.close() return json_data, error
KeyboardInterrupt
dataset/ETHPy150Open getredash/redash/redash/query_runner/impala_ds.py/Impala.run_query
5,785
def get_filediff(self, request, *args, **kwargs): """Returns the FileDiff, or an error, for the given parameters.""" draft_resource = resources.review_request_draft try: draft = draft_resource.get_object(request, *args, **kwargs) except ObjectDoesNotExist: return DOES_NOT_EXIST if not draft_resource.has_access_permissions(request, draft): return self.get_no_access_error(request) try: return resources.draft_filediff.get_object(request, *args, **kwargs) except __HOLE__: return DOES_NOT_EXIST
ObjectDoesNotExist
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/draft_original_file.py/DraftOriginalFileResource.get_filediff
5,786
@app.route('/api/store/', methods=['POST']) def store(): """ Accepts a gzipped JSON POST body. If ``PUBLIC_WRITES`` is truthy, the Authorization header is ignored. Format resembles the following: >>> { >>> "event_type": "Exception", >>> "tags": [ ["level", "error"], ["server", "sentry.local"] ], >>> "date": "2010-06-18T22:31:45", >>> "time_spent": 0.0, >>> "event_id": "452dfa92380f438f98159bb75b9469e5", >>> "data": { >>> "culprit": "path.to.function", >>> "version": ["module", "version string"], >>> "modules": { >>> "module": "version string" >>> }, >>> "extra": { >>> "key": "value", >>> }, >>> "sentry.interfaces.Http": { >>> "url": "http://example.com/foo/bar", >>> "method": "POST", >>> "querystring": "baz=bar&foo=baz", >>> "data": { >>> "key": "value" >>> } >>> }, >>> "sentry.interfaces.Exception": { >>> "type": "ValueError", >>> "value": "An example exception" >>> }, >>> "sentry.interfaces.Stacktrace": { >>> "frames": [ >>> { >>> "filename": "/path/to/filename.py", >>> "module": "path.to.module", >>> "function": "function_name", >>> "vars": { >>> "key": "value" >>> } >>> } >>> ] >>> } >>> } >>> } """ has_header = request.environ.get('AUTHORIZATION', '').startswith('Sentry') if not (app.config['PUBLIC_WRITES'] or has_header): abort(401,'Unauthorized') data = request.data if has_header: auth_vars = parse_auth_header(request.META['AUTHORIZATION']) signature = auth_vars.get('signature') timestamp = auth_vars.get('timestamp') nonce = auth_vars.get('nonce') # TODO: check nonce # Signed data packet if signature and timestamp: try: timestamp = float(timestamp) except __HOLE__: abort(400, 'Invalid Timestamp') if timestamp < time.time() - 3600: # 1 hour abort(410, 'Message has expired') if signature != get_mac_signature(app.config['KEY'], data, timestamp, nonce): abort(403, 'Invalid signature') else: abort(401,'Unauthorized') logger = logging.getLogger('sentry.web.api.store') try: data = base64.b64decode(data).decode('zlib') except Exception, e: # This error should be caught as it suggests that there's a # bug somewhere in the client's code. logger.exception('Bad data received') abort(400, 'Bad data decoding request (%s, %s)' % (e.__class__.__name__, e)) try: data = simplejson.loads(data) except Exception, e: # This error should be caught as it suggests that there's a # bug somewhere in the client's code. logger.exception('Bad data received') abort(403, 'Bad data reconstructing object (%s, %s)' % (e.__class__.__name__, e)) # XXX: ensure keys are coerced to strings data = dict((str(k), v) for k, v in data.iteritems()) if 'date' in data: if is_float(data['date']): data['date'] = datetime.datetime.fromtimestamp(float(data['date'])) else: if '.' in data['date']: format = '%Y-%m-%dT%H:%M:%S.%f' else: format = '%Y-%m-%dT%H:%M:%S' data['date'] = datetime.datetime.strptime(data['date'], format) event, group = app.client.store(**data) return event.pk
ValueError
dataset/ETHPy150Open dcramer/sentry-old/sentry/collector/views.py/store
5,787
def get_command(self, ctx, name): try: if sys.version_info[0] == 2: name = name.encode('ascii', 'replace') mod = __import__('complex.commands.cmd_' + name, None, None, ['cli']) except __HOLE__: return return mod.cli
ImportError
dataset/ETHPy150Open pallets/click/examples/complex/complex/cli.py/ComplexCLI.get_command
5,788
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None): ''' Compare different libcloud versions ''' if not HAS_LIBCLOUD: return False if not isinstance(reqver, (list, tuple)): raise RuntimeError( '\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)' ) try: import libcloud # pylint: disable=redefined-outer-name except __HOLE__: raise ImportError( 'salt-cloud requires >= libcloud {0} which is not installed'.format( '.'.join([str(num) for num in reqver]) ) ) if LIBCLOUD_VERSION_INFO >= reqver: return libcloud.__version__ errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__) errormsg += 'salt-cloud requires >= libcloud {0}'.format( '.'.join([str(num) for num in reqver]) ) if why: errormsg += ' for {0}'.format(why) errormsg += '. Please upgrade.' raise ImportError(errormsg)
ImportError
dataset/ETHPy150Open saltstack/salt/salt/cloud/libcloudfuncs.py/check_libcloud_version
5,789
def create_directory(class_name, output): output_name = output if output_name[-1] != "/": output_name = output_name + "/" pathdir = output_name + class_name try: if not os.path.exists(pathdir): os.makedirs(pathdir) except __HOLE__: # FIXME pass
OSError
dataset/ETHPy150Open androguard/androguard/androdd.py/create_directory
5,790
def clean_upload(self): dirs_do_delete = [ appengine_rocket_engine, appengine_libs, virtualenv_appengine_libs ] for path in dirs_do_delete: try: shutil.rmtree(path) except __HOLE__: pass
OSError
dataset/ETHPy150Open xando/django-rocket-engine/rocket_engine/management/commands/appengine.py/Command.clean_upload
5,791
def update(self, argv): self.clean_upload() try: self.prepare_upload() try: get_callable(PRE_UPDATE_HOOK)() except (AttributeError, ImportError): pass appcfg.main(argv[1:] + [PROJECT_DIR]) try: get_callable(POST_UPDATE_HOOK)() except (AttributeError, __HOLE__): pass finally: self.clean_upload()
ImportError
dataset/ETHPy150Open xando/django-rocket-engine/rocket_engine/management/commands/appengine.py/Command.update
5,792
def _to_node(self, data): if data: try: state = self.NODE_STATE_MAP[data['status']] except KeyError: state = NodeState.UNKNOWN if 'server' not in data: # Response does not contain server UUID if the server # creation failed because of insufficient funds. return None public_ips = [] if 'nic:0:dhcp' in data: if isinstance(data['nic:0:dhcp'], list): public_ips = data['nic:0:dhcp'] else: public_ips = [data['nic:0:dhcp']] extra = {} extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), ('status', 'str')] for key, value_type in extra_keys: if key in data: value = data[key] if value_type == 'int': value = int(value) elif value_type == 'auto': try: value = int(value) except __HOLE__: pass extra.update({key: value}) if 'vnc:ip' in data and 'vnc:password' in data: extra.update({'vnc_ip': data['vnc:ip'], 'vnc_password': data['vnc:password']}) node = Node(id=data['server'], name=data['name'], state=state, public_ips=public_ips, private_ips=None, driver=self.connection.driver, extra=extra) return node return None
ValueError
dataset/ETHPy150Open apache/libcloud/libcloud/compute/drivers/cloudsigma.py/CloudSigma_1_0_NodeDriver._to_node
5,793
def run_srb(self, *argv, **kwargs): if len(argv) == 1 and isinstance(argv[0], six.string_types): # convert a single string to a list argv = shlex.split(argv[0]) mock_stdout = six.StringIO() mock_stderr = six.StringIO() if 'exp_results' in kwargs: exp_results = kwargs['exp_results'] argv = argv[:-1] else: exp_results = None srb_args = ["", self.tempfile] + [str(s) for s in argv] try: with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): ringbuilder.main(srb_args) except __HOLE__ as err: valid_exit_codes = None if exp_results is not None and 'valid_exit_codes' in exp_results: valid_exit_codes = exp_results['valid_exit_codes'] else: valid_exit_codes = (0, 1) # (success, warning) if err.code not in valid_exit_codes: msg = 'Unexpected exit status %s\n' % err.code msg += 'STDOUT:\n%s\nSTDERR:\n%s\n' % ( mock_stdout.getvalue(), mock_stderr.getvalue()) self.fail(msg) return (mock_stdout.getvalue(), mock_stderr.getvalue())
SystemExit
dataset/ETHPy150Open openstack/swift/test/unit/cli/test_ringbuilder.py/RunSwiftRingBuilderMixin.run_srb
5,794
def tearDown(self): try: shutil.rmtree(self.tmpdir, True) except __HOLE__: pass
OSError
dataset/ETHPy150Open openstack/swift/test/unit/cli/test_ringbuilder.py/TestCommands.tearDown
5,795
def create_sample_ring(self, part_power=6): """ Create a sample ring with four devices At least four devices are needed to test removing a device, since having less devices than replicas is not allowed. """ # Ensure there is no existing test builder file because # create_sample_ring() might be used more than once in a single test try: os.remove(self.tmpfile) except __HOLE__: pass ring = RingBuilder(part_power, 3, 1) ring.add_dev({'weight': 100.0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'device': 'sda1', 'meta': 'some meta data', }) ring.add_dev({'weight': 100.0, 'region': 1, 'zone': 1, 'ip': '127.0.0.2', 'port': 6201, 'device': 'sda2' }) ring.add_dev({'weight': 100.0, 'region': 2, 'zone': 2, 'ip': '127.0.0.3', 'port': 6202, 'device': 'sdc3' }) ring.add_dev({'weight': 100.0, 'region': 3, 'zone': 3, 'ip': '127.0.0.4', 'port': 6203, 'device': 'sdd4' }) ring.save(self.tmpfile)
OSError
dataset/ETHPy150Open openstack/swift/test/unit/cli/test_ringbuilder.py/TestCommands.create_sample_ring
5,796
def tearDown(self): try: shutil.rmtree(self.tmpdir, True) except __HOLE__: pass
OSError
dataset/ETHPy150Open openstack/swift/test/unit/cli/test_ringbuilder.py/TestRebalanceCommand.tearDown
5,797
def run_srb(self, *argv): mock_stdout = six.StringIO() mock_stderr = six.StringIO() srb_args = ["", self.tempfile] + [str(s) for s in argv] try: with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): ringbuilder.main(srb_args) except __HOLE__ as err: if err.code not in (0, 1): # (success, warning) raise return (mock_stdout.getvalue(), mock_stderr.getvalue())
SystemExit
dataset/ETHPy150Open openstack/swift/test/unit/cli/test_ringbuilder.py/TestRebalanceCommand.run_srb
5,798
def _sort_gens(gens, **args): """Sort generators in a reasonably intelligent way. """ opt = build_options(args) gens_order, wrt = {}, None if opt is not None: gens_order, wrt = {}, opt.wrt for i, gen in enumerate(opt.sort): gens_order[gen] = i + 1 def order_key(gen): gen = str(gen) if wrt is not None: try: return (-len(wrt) + wrt.index(gen), gen, 0) except __HOLE__: pass name, index = _re_gen.match(gen).groups() if index: index = int(index) else: index = 0 try: return ( gens_order[name], name, index) except KeyError: pass try: return (_gens_order[name], name, index) except KeyError: pass return (_max_order, name, index) try: gens = sorted(gens, key=order_key) except TypeError: # pragma: no cover pass return tuple(gens)
ValueError
dataset/ETHPy150Open sympy/sympy/sympy/polys/polyutils.py/_sort_gens
5,799
def _parallel_dict_from_expr_if_gens(exprs, opt): """Transform expressions into a multinomial form given generators. """ k, indices = len(opt.gens), {} for i, g in enumerate(opt.gens): indices[g] = i polys = [] for expr in exprs: poly = {} if expr.is_Equality: expr = expr.lhs - expr.rhs for term in Add.make_args(expr): coeff, monom = [], [0]*k for factor in Mul.make_args(term): if not _not_a_coeff(factor) and factor.is_Number: coeff.append(factor) else: try: if opt.series is False: base, exp = decompose_power(factor) if exp < 0: exp, base = -exp, Pow(base, -S.One) else: base, exp = decompose_power_rat(factor) monom[indices[base]] = exp except __HOLE__: if not factor.free_symbols.intersection(opt.gens): coeff.append(factor) else: raise PolynomialError("%s contains an element of the generators set" % factor) monom = tuple(monom) if monom in poly: poly[monom] += Mul(*coeff) else: poly[monom] = Mul(*coeff) polys.append(poly) return polys, opt.gens
KeyError
dataset/ETHPy150Open sympy/sympy/sympy/polys/polyutils.py/_parallel_dict_from_expr_if_gens