Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,400
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> grouped = df.groupby(lambda x: mapping[x]) >>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0) """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) try: res = res.squeeze() except __HOLE__: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (lib.isscalar(res) and isnull(res)): if res and notnull(res): indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError("filter function returned a %s, " "but expected a scalar bool" % type(res).__name__) return self._apply_filter(indices, dropna)
AttributeError
dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/NDFrameGroupBy.filter
7,401
def _aggregate_item_by_item(self, func, *args, **kwargs): obj = self._obj_with_exclusions result = {} if self.axis > 0: for item in obj: try: itemg = DataFrameGroupBy(obj[item], axis=self.axis - 1, grouper=self.grouper) result[item] = itemg.aggregate(func, *args, **kwargs) except (__HOLE__, TypeError): raise new_axes = list(obj.axes) new_axes[self.axis] = self.grouper.result_index return Panel._from_axes(result, new_axes) else: raise ValueError("axis value must be greater than 0")
ValueError
dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/PanelGroupBy._aggregate_item_by_item
7,402
def post(self, route, data, headers=HEADERS): result = self.fetch("/%s" % route, method = "POST", body = json.dumps(data), headers=headers).body try: return json.loads(result) except __HOLE__: raise ValueError(result)
ValueError
dataset/ETHPy150Open sihrc/tornado-boilerplate/indico/tests/testutils/server.py/ServerTest.post
7,403
def vertebral_detection(fname, fname_seg, init_disc, verbose, laplacian=0): shift_AP = 32 # shift the centerline towards the spine (in voxel). size_AP = 11 # window size in AP direction (=y) (in voxel) size_RL = 1 # window size in RL direction (=x) (in voxel) size_IS = 19 # window size in IS direction (=z) (in voxel) smooth_factor = [9, 3, 1] # searching_window_for_maximum = 5 # size used for finding local maxima thr_corr = 0.2 # disc correlation threshold. Below this value, use template distance. # gaussian_std_factor = 5 # the larger, the more weighting towards central value. This value is arbitrary-- should adjust based on large dataset fig_anat_straight = 1 # handle for figure fig_pattern = 2 # handle for figure # fig_corr = 3 # handle for figure # initialization contrast_template = 't2' # capitalize letters for contrast if contrast_template == 't1': contrast_template = 'T1' elif contrast_template == 't2': contrast_template = 'T2' # get path of SCT from os import path path_script = path.dirname(__file__) path_sct = slash_at_the_end(path.dirname(path_script), 1) folder_template = 'data/template/' # retrieve file_template based on contrast fname_template_list = glob(path_sct+folder_template+'*'+contrast_template+'.nii.gz') # TODO: make sure there is only one file -- check if file is there otherwise it crashes fname_template = fname_template_list[0] # retrieve disc level from template fname_disc_list = glob(path_sct+folder_template+'*_disc.nii.gz') fname_disc = fname_disc_list[0] # Open template and disc labels data_template = Image(fname_template).data data_disc_template = Image(fname_disc).data # apply Laplacian filtering to template data if laplacian: printv('\nApplying Laplacian filter to template data...', verbose) from sct_maths import laplacian data_template = laplacian(data_template.astype(float), [1, 1, 1]) # define mean distance (in voxel) between adjacent discs: [C1/C2 -> C2/C3], [C2/C3 -> C4/C5], ..., [L1/L2 -> L2/L3] list_disc_value_template = sorted(data_disc_template[data_disc_template.nonzero()]) list_disc_z_template = [int(np.where(data_disc_template == list_disc_value_template[i])[2]) for i in range(len(list_disc_value_template))] printv('\nDisc values from template: '+str(list_disc_value_template), verbose) list_distance_template = (np.diff(list_disc_z_template) * (-1)).tolist() # multiplies by -1 to get positive distances printv('Distances between discs (in voxel): '+str(list_distance_template), verbose) # create pattern for each disc # data_template = Image(fname_template) if verbose == 2: import matplotlib.pyplot as plt plt.ion() # enables interactive mode # open anatomical volume img = Image(fname) data = img.data # smooth data from scipy.ndimage.filters import gaussian_filter data = gaussian_filter(data, smooth_factor, output=None, mode="reflect") # get dimension nx, ny, nz, nt, px, py, pz, pt = img.dim #================================================== # Compute intensity profile across vertebrae #================================================== # convert mm to voxel index # shift_AP = int(round(shift_AP / py)) # size_AP = int(round(size_AP / py)) # size_RL = int(round(size_RL / px)) # size_IS = int(round(size_IS / pz)) # define z: vector of indices along spine z = range(nz) # define xc and yc (centered in the field of view) xc = int(round(nx/2)) # direction RL yc = int(round(ny/2)) # direction AP # display stuff if verbose == 2: plt.matshow(np.mean(data[xc-size_RL:xc+size_RL, :, :], axis=0).transpose(), fignum=fig_anat_straight, cmap=plt.cm.gray, origin='lower') plt.title('Anatomical image') plt.autoscale(enable=False) # to prevent autoscale of axis when displaying plot # plt.text(yc+shift_AP+4, init_disc[0], 'init', verticalalignment='center', horizontalalignment='left', color='yellow', fontsize=15), plt.draw() # FIND DISCS # =========================================================================== printv('\nDetect intervertebral discs...', verbose) # assign initial z and disc current_z = init_disc[0] current_disc = init_disc[1] # mean_distance = mean_distance * pz # mean_distance_real = np.zeros(len(mean_distance)) # create list for z and disc list_disc_z = [] list_disc_value = [] # # do local adjustment to be at the center of the disc # printv('.. local adjustment to center disc', verbose) # pattern = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z-size_IS:current_z+size_IS+1] # current_z = local_adjustment(xc, yc, current_z, current_disc, data, size_RL, shift_AP, size_IS, searching_window_for_maximum, verbose) # if verbose == 2: # plt.figure(fig_anat_straight), plt.scatter(yc+shift_AP, current_z, c='g', s=50) # plt.text(yc+shift_AP+4, current_z, str(current_disc)+'/'+str(current_disc+1), verticalalignment='center', horizontalalignment='left', color='green', fontsize=15) # # plt.draw() # append value to main list # list_disc_z = np.append(list_disc_z, current_z).astype(int) # list_disc_value = np.append(list_disc_value, current_disc).astype(int) # # update initial value (used when switching disc search to inferior direction) # init_disc[0] = current_z # find_disc(data, current_z, current_disc, approx_distance_to_next_disc, direction) # loop until potential new peak is inside of FOV direction = 'superior' search_next_disc = True while search_next_disc: printv('Current disc: '+str(current_disc)+' (z='+str(current_z)+'). Direction: '+direction, verbose) try: # get z corresponding to current disc on template current_z_template = int(np.where(data_disc_template == current_disc)[2]) except TypeError: # in case reached the bottom (see issue #849) printv('WARNING: Reached the bottom of the template. Stop searching.', verbose, 'warning') break # Get pattern from template corresponding to current_disc pattern = data_template[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z_template-size_IS:current_z_template+size_IS+1] pattern1d = pattern.ravel() if verbose == 2: # display init disc plt.figure(fig_anat_straight) plt.scatter(yc+shift_AP, current_z, c='red', s=50) # # display template pattern # plt.figure(fig_pattern) # plt.matshow(np.flipud(np.mean(pattern[:, :, :], axis=0).transpose()), fignum=fig_pattern, cmap=plt.cm.gray) # plt.title('Pattern in sagittal averaged across R-L') # plt.show() # compute correlation between pattern and data # printv('.. approximate distance to next disc: '+str(approx_distance_to_next_disc)+' mm', verbose) range_z = range(-10, 10) # length_y_corr = range(-5, 5) # I_corr = np.zeros((length_z_corr)) I_corr = np.zeros(len(range_z)) # ind_y = 0 allzeros = 0 # for iy in length_y_corr: # loop across range of z defined by template distance ind_I = 0 for iz in range_z: # if pattern extends towards the top part of the image, then crop and pad with zeros if current_z+iz+size_IS > nz: padding_size = current_z+iz+size_IS data_chunk3d = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z+iz-size_IS:current_z+iz+size_IS+1-padding_size] data_chunk3d = np.pad(data_chunk3d, ((0, 0), (0, 0), (0, padding_size)), 'constant', constant_values=0) # if pattern extends towards bottom part of the image, then crop and pad with zeros elif current_z-iz-size_IS < 0: padding_size = abs(current_z-iz-size_IS) data_chunk3d = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z-iz-size_IS+padding_size:current_z-iz+size_IS+1] data_chunk3d = np.pad(data_chunk3d, ((0, 0), (0, 0), (padding_size, 0)), 'constant', constant_values=0) else: data_chunk3d = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z+iz-size_IS:current_z+iz+size_IS+1] # if verbose == 2 and iz == 0: # # display template and subject patterns # plt.figure(fig_pattern) # plt.subplot(131) # plt.imshow(np.flipud(np.mean(pattern[:, :, :], axis=0).transpose()), origin='upper', cmap=plt.cm.gray, interpolation='none') # plt.title('Template pattern') # plt.subplot(132) # plt.imshow(np.flipud(np.mean(data_chunk3d[:, :, :], axis=0).transpose()), origin='upper', cmap=plt.cm.gray, interpolation='none') # plt.title('Subject pattern at iz=0') # # save figure # plt.figure(fig_pattern), plt.savefig('../fig_pattern_disc'+str(current_disc)+'.png'), plt.close() # convert subject pattern to 1d data_chunk1d = data_chunk3d.ravel() # check if data_chunk1d contains at least one non-zero value # if np.any(data_chunk1d): --> old code which created issue #794 (jcohenadad 2016-04-05) if (data_chunk1d.size == pattern1d.size) and np.any(data_chunk1d): #I_corr[ind_I] = np.corrcoef(data_chunk1d, pattern1d)[0, 1] # data_chunk2d = np.mean(data_chunk3d, 1) # pattern2d = np.mean(pattern, 1) I_corr[ind_I] = calc_MI(data_chunk1d, pattern1d, 32) # from sklearn import metrics # I_corr[ind_I] = metrics.adjusted_mutual_info_score(data_chunk1d, pattern1d) else: allzeros = 1 # printv('.. WARNING: iz='+str(iz)+': Data only contains zero. Set correlation to 0.', verbose) ind_I = ind_I + 1 # ind_y = ind_y + 1 if allzeros: printv('.. WARNING: Data contained zero. We probably hit the edge of the image.', verbose) # adjust correlation with Gaussian function centered at 'approx_distance_to_next_disc' # gaussian_window = gaussian(len(range_z), std=len(range_z)/gaussian_std_factor) # I_corr_adj = np.multiply(I_corr.transpose(), gaussian_window).transpose() I_corr_adj = I_corr # # display correlation curves # if verbose == 2: # plt.figure(fig_corr) # plt.plot(I_corr_adj) # plt.title('Correlation of pattern with data.') # Find peak within local neighborhood defined by mean distance template # ind_peak = argrelextrema(I_corr_adj, np.greater, order=searching_window_for_maximum)[0] # ind_peak = np.zeros(2).astype(int) # if len(ind_peak) == 0: # printv('.. WARNING: No peak found. Using adjusted template distance.', verbose) # ind_peak[0] = approx_distance_to_next_disc # based on distance template # ind_peak[1] = 0 # no shift along y # else: # Find global maximum # ind_peak = ind_peak[np.argmax(I_corr_adj[ind_peak])] if np.any(I_corr_adj): # if I_corr_adj contains at least a non-zero value ind_peak = [i for i in range(len(I_corr_adj)) if I_corr_adj[i] == max(I_corr_adj)][0] # index of max along z # ind_peak[1] = np.where(I_corr_adj == I_corr_adj.max())[1] # index of max along y printv('.. Peak found: z='+str(ind_peak)+' (correlation = '+str(I_corr_adj[ind_peak])+')', verbose) # check if correlation is high enough if I_corr_adj[ind_peak] < thr_corr: printv('.. WARNING: Correlation is too low. Using adjusted template distance.', verbose) ind_peak = range_z.index(0) # approx_distance_to_next_disc # ind_peak[1] = int(round(len(length_y_corr)/2)) else: # if I_corr_adj contains only zeros printv('.. WARNING: Correlation vector only contains zeros. Using adjusted template distance.', verbose) ind_peak = range_z.index(0) # approx_distance_to_next_disc # # display peak # if verbose == 2: # plt.figure(fig_corr), plt.plot(ind_peak, I_corr_adj[ind_peak], 'ro'), plt.draw() # # save figure # plt.figure(fig_corr), plt.savefig('../fig_correlation_disc'+str(current_disc)+'.png'), plt.close() # display patterns and correlation if verbose == 2: # display template pattern plt.figure(fig_pattern, figsize=(20, 7)) plt.subplot(141) plt.imshow(np.flipud(np.mean(pattern[:, :, :], axis=0).transpose()), origin='upper', cmap=plt.cm.gray, interpolation='none') plt.title('Template pattern') # display subject pattern centered at current_z plt.subplot(142) iz = 0 data_chunk3d = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z+iz-size_IS:current_z+iz+size_IS+1] plt.imshow(np.flipud(np.mean(data_chunk3d[:, :, :], axis=0).transpose()), origin='upper', cmap=plt.cm.gray, interpolation='none') plt.title('Subject at iz=0') # display subject pattern centered at current_z plt.subplot(143) iz = range_z[ind_peak] data_chunk3d = data[xc-size_RL:xc+size_RL+1, yc+shift_AP-size_AP:yc+shift_AP+size_AP+1, current_z+iz-size_IS:current_z+iz+size_IS+1] plt.imshow(np.flipud(np.mean(data_chunk3d[:, :, :], axis=0).transpose()), origin='upper', cmap=plt.cm.gray, interpolation='none') plt.title('Subject at iz='+str(iz)) # display correlation curve plt.subplot(144) plt.plot(I_corr_adj) plt.title('MI between template and subject pattern') plt.plot(ind_peak, I_corr_adj[ind_peak], 'ro'), plt.draw() plt.axvline(x=range_z.index(0), linewidth=1, color='black', linestyle='dashed') plt.axhline(y=thr_corr, linewidth=1, color='r', linestyle='dashed') # save figure plt.figure(fig_pattern), plt.savefig('../fig_pattern_MI'+str(current_disc)+'.png'), plt.close() # assign new z_start and disc value current_z = current_z + range_z[ind_peak] # display new disc if verbose == 2: plt.figure(fig_anat_straight), plt.scatter(yc+shift_AP, current_z, c='yellow', s=50) plt.text(yc+shift_AP+4, current_z, str(current_disc)+'/'+str(current_disc+1), verticalalignment='center', horizontalalignment='left', color='yellow', fontsize=15), plt.draw() # append to main list if direction == 'superior': # append at the beginning list_disc_z.insert(0, current_z) list_disc_value.insert(0, current_disc) elif direction == 'inferior': # append at the end list_disc_z.append(current_z) list_disc_value.append(current_disc) # adjust correcting factor based on already-identified discs if len(list_disc_z) > 1: # compute distance between already-identified discs list_distance_current = (np.diff(list_disc_z) * (-1)).tolist() # retrieve the template distance corresponding to the already-identified discs index_disc_identified = [i for i, j in enumerate(list_disc_value_template) if j in list_disc_value[:-1]] list_distance_template_identified = [list_distance_template[i] for i in index_disc_identified] # divide subject and template distances for the identified discs list_subject_to_template_distance = [float(list_distance_current[i]) / list_distance_template_identified[i] for i in range(len(list_distance_current))] # average across identified discs to obtain an average correcting factor correcting_factor = np.mean(list_subject_to_template_distance) printv('.. correcting factor: '+str(correcting_factor), verbose) else: correcting_factor = 1 # update list_distance specific for the subject list_distance = [int(round(list_distance_template[i] * correcting_factor)) for i in range(len(list_distance_template))] # updated average_disc_distance (in case it is needed) # average_disc_distance = int(round(np.mean(list_distance))) # assign new current_z and disc value if direction == 'superior': try: approx_distance_to_next_disc = list_distance[list_disc_value_template.index(current_disc-1)] except ValueError: printv('WARNING: Disc value not included in template. Using previously-calculated distance: '+str(approx_distance_to_next_disc)) # try: # approx_distance_to_next_disc = list_distance[list_disc_value_template.index(current_disc)] # except ValueError: # printv('WARNING: Disc value not included in template. Using previous disc distance: '+str(approx_distance_to_next_disc)) # approx_distance_to_next_disc = average_disc_distance # assign new current_z and disc value current_z = current_z + approx_distance_to_next_disc current_disc = current_disc - 1 elif direction == 'inferior': try: approx_distance_to_next_disc = list_distance[list_disc_value_template.index(current_disc)] except: printv('WARNING: Disc value not included in template. Using previously-calculated distance: '+str(approx_distance_to_next_disc)) # approx_distance_to_next_disc = average_disc_distance # assign new current_z and disc value current_z = current_z - approx_distance_to_next_disc current_disc = current_disc + 1 # if current_z is larger than searching zone, switch direction (and start from initial z minus approximate distance from updated template distance) if current_z >= nz or current_disc == 1: printv('.. Switching to inferior direction.', verbose) direction = 'inferior' current_disc = init_disc[1] + 1 current_z = init_disc[0] - list_distance[list_disc_value_template.index(current_disc)] # if current_z is lower than searching zone, stop searching if current_z <= 0: search_next_disc = False # if verbose == 2: # # close figures # plt.figure(fig_corr), plt.close() # plt.figure(fig_pattern), plt.close() # if upper disc is not 1, add disc above top disc based on mean_distance_adjusted upper_disc = min(list_disc_value) if not upper_disc == 1: printv('Adding top disc based on adjusted template distance: #'+str(upper_disc-1), verbose) approx_distance_to_next_disc = list_distance[list_disc_value_template.index(upper_disc-1)] next_z = max(list_disc_z) + approx_distance_to_next_disc printv('.. approximate distance: '+str(approx_distance_to_next_disc), verbose) # make sure next disc does not go beyond FOV in superior direction if next_z > nz: list_disc_z.insert(0, nz) else: list_disc_z.insert(0, next_z) # assign disc value list_disc_value.insert(0, upper_disc-1) # LABEL SEGMENTATION # open segmentation seg = Image(fname_seg) # loop across z for iz in range(nz): # get index of the disc right above iz try: ind_above_iz = max([i for i in range(len(list_disc_z)) if list_disc_z[i] > iz]) except __HOLE__: # if ind_above_iz is empty, attribute value 0 vertebral_level = 0 else: # assign vertebral level (add one because iz is BELOW the disk) vertebral_level = list_disc_value[ind_above_iz] + 1 # print vertebral_level # get voxels in mask ind_nonzero = np.nonzero(seg.data[:, :, iz]) seg.data[ind_nonzero[0], ind_nonzero[1], iz] = vertebral_level if verbose == 2: plt.figure(fig_anat_straight) plt.scatter(int(round(ny/2)), iz, c=vertebral_level, vmin=min(list_disc_value), vmax=max(list_disc_value), cmap='prism', marker='_', s=200) # write file seg.file_name += '_labeled' seg.save() # save figure if verbose == 2: plt.figure(fig_anat_straight), plt.savefig('../fig_anat_straight_with_labels.png') plt.close() # Create label # ==========================================================================================
ValueError
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/sct_label_vertebrae.py/vertebral_detection
7,404
def setUp(self): try: djangorunner.DjangoRunner() except __HOLE__: raise unittest.SkipTest("Django is not installed") saved_stdout = sys.stdout self.stream = StringIO() sys.stdout = self.stream self.addCleanup(setattr, sys, 'stdout', saved_stdout)
ImportError
dataset/ETHPy150Open CleanCut/green/green/test/test_djangorunner.py/TestDjangoRunner.setUp
7,405
def handle(self, *args, **options): if len(args) != 1: raise CommandError('Incorrect arguments') try: with open(args[0]) as f: data = json.load(f) except __HOLE__ as exc: raise CommandError("Malformed JSON: %s" % exc.message) def slot_generator(mydata): for d in mydata['slots']: for h in d['hours']: yield (d['day'], h) for roomcode, roomdesc in data['rooms'].iteritems(): r = Room(code=roomcode, name=roomdesc) r.save() for desc in data['allocations']: started = False t = Topic.objects.get(name=desc['topic']) room = Room.objects.get(code=desc['room']) for (d, h) in slot_generator(data): if (d == desc['start_day'] and h == desc['first_slot']): started = True if started: s = Slot(start_time="%s %s" % (d, h), room=room, topic=t) s.save() if (d == desc['end_day'] and h == desc['last_slot']): break
ValueError
dataset/ETHPy150Open openstack-infra/odsreg/scheduling/management/commands/loadslots.py/Command.handle
7,406
def __getattr__(self, key): try: return self.__kwargs[key] except __HOLE__: raise AttributeError(key) # Have to provide __setstate__ to avoid # infinite recursion since we override # __getattr__.
KeyError
dataset/ETHPy150Open mongodb/mongo-python-driver/bson/dbref.py/DBRef.__getattr__
7,407
@staticmethod def _make_db(updates): try: resource_provider = updates.pop('resource_provider') updates['resource_provider_id'] = resource_provider.id except (KeyError, NotImplementedError): raise exception.ObjectActionError( action='create', reason='resource_provider required') try: resource_class = updates.pop('resource_class') except __HOLE__: raise exception.ObjectActionError( action='create', reason='resource_class required') updates['resource_class_id'] = fields.ResourceClass.index( resource_class) return updates
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/objects/resource_provider.py/_HasAResourceProvider._make_db
7,408
def _get_dci_id_and_proj_name(self, proj_name): # dci_id can be embedded in the partition name, name:dci_id:27 dciid_key = ':dci_id:' try: dci_index = proj_name.index(dciid_key) except __HOLE__: # There is no dci_id in the project name return proj_name, None proj_fields = proj_name[dci_index + 1:].split(':') if len(proj_fields) == 2: if (proj_fields[1].isdigit() and proj_fields[0] == dciid_key[1:-1]): LOG.debug('project name %(proj)s DCI_ID %(dci_id)s.', ( {'proj': proj_name[0:dci_index], 'dci_id': proj_fields[1]})) return proj_name[0:dci_index], proj_fields[1]
ValueError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/DfaServer._get_dci_id_and_proj_name
7,409
def _get_segmentation_id(self, segid): """Allocate segmentation id.""" try: newseg = (segid, self.segmentation_pool.remove(segid) if segid and segid in self.segmentation_pool else self.segmentation_pool.pop()) return newseg[0] if newseg[0] else newseg[1] except __HOLE__: LOG.exception(_LE('Error: Segmentation id pool is empty')) return 0
KeyError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/DfaServer._get_segmentation_id
7,410
def network_create_event(self, network_info): """Process network create event. Save the network inforamtion in the database. """ net = network_info['network'] net_id = net['id'] self.network[net_id] = {} self.network[net_id].update(net) net_name = net.get('name') tenant_id = net.get('tenant_id') # Extract segmentation_id from the network name net_ext_name = self.cfg.dcnm.dcnm_net_ext nobj = re.search(net_ext_name, net_name) try: seg_id = int((net_name[nobj.start(0) + len(net_ext_name) - 1:] if nobj else None)) except (IndexError, __HOLE__, ValueError): seg_id = None # Check if network is already created. query_net = self.get_network_by_segid(seg_id) if seg_id else None if query_net: # The network is already created no need to process the event. if query_net.source.lower() == 'dcnm': # DCNM created the network. Only update network id in database. prev_id = query_net.network_id params = dict(columns=dict(network_id=net_id)) self.update_network(prev_id, **params) # Update the network cache. prev_info = self.network.pop(prev_id) prev_info['id'] = net_id self.network[net_id] = prev_info # Update the network name. After extracting the segmentation_id # no need to keep it in the name. Removing it and update # the network. updated_net_name = ( net_name[:nobj.start(0) + len(net_ext_name) - 1]) try: body = {'network': {'name': updated_net_name, }} dcnm_net = self.neutronclient.update_network( net_id, body=body).get('network') LOG.debug('Updated network %(network)s', dcnm_net) except Exception as exc: LOG.exception(_LE('Failed to update network ' '%(network)s. Reason %(err)s.'), {'network': updated_net_name, 'err': str(exc)}) return LOG.info(_LI('network_create_event: network %(name)s was created ' 'by %(source)s. Ignoring processing the event.'), {'name': net_name, 'source': 'dcnm'}) return # Check if project (i.e. tenant) exist. tenant_name = self.get_project_name(tenant_id) if not tenant_name: LOG.error(_LE('Failed to create network %(name)s. Project ' '%(tenant_id)s does not exist.'), {'name': net_name, 'tenant_id': tenant_id}) return pseg_id = self.network[net_id].get('provider:segmentation_id') seg_id = self._get_segmentation_id(pseg_id) self.network[net_id]['segmentation_id'] = seg_id try: cfgp, fwd_mod = self.dcnm_client.get_config_profile_for_network( net.get('name')) self.network[net_id]['config_profile'] = cfgp self.network[net_id]['fwd_mod'] = fwd_mod self.add_network_db(net_id, self.network[net_id], 'openstack', constants.RESULT_SUCCESS) LOG.debug('network_create_event: network=%s', self.network) except dexc.DfaClientRequestFailed: # Fail to get config profile from DCNM. # Save the network info with failure result and send the request # to DCNM later. self.add_network_db(net_id, self.network[net_id], 'openstack', constants.CREATE_FAIL) LOG.error(_LE('Failed to create network=%s.'), self.network)
TypeError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/DfaServer.network_create_event
7,411
def _get_ip_leases(self): if not self.cfg.dcnm.dcnm_dhcp_leases: LOG.debug('DHCP lease file is not defined.') return try: ssh_session = paramiko.SSHClient() ssh_session.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_session.connect(self.cfg.dcnm.dcnm_ip, username=self.cfg.dcnm.dcnm_user, password=self.cfg.dcnm.dcnm_password) except Exception: LOG.exception(_LE('Failed to establish connection with DCNM.')) if ssh_session: ssh_session.close() return try: ftp_session = ssh_session.open_sftp() dhcpd_leases = ftp_session.file(self.cfg.dcnm.dcnm_dhcp_leases) leases = dhcpd_leases.readlines() ftp_session.close() ssh_session.close() return leases except __HOLE__: ftp_session.close() ssh_session.close() LOG.error(_LE('Cannot open %(file)s.'), {'file': self.cfg.dcnm.dcnm_dhcp_leases})
IOError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/DfaServer._get_ip_leases
7,412
def save_my_pid(cfg): mypid = os.getpid() pid_path = cfg.dfa_log.pid_dir pid_file = cfg.dfa_log.pid_server_file if pid_path and pid_file: try: if not os.path.exists(pid_path): os.makedirs(pid_path) except __HOLE__: LOG.error(_LE('Fail to create %s'), pid_path) return pid_file_path = os.path.join(pid_path, pid_file) LOG.debug('dfa_server pid=%s', mypid) with open(pid_file_path, 'w') as funcp: funcp.write(str(mypid))
OSError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/save_my_pid
7,413
def dfa_server(): try: cfg = config.CiscoDFAConfig().cfg logging.setup_logger('dfa_enabler', cfg) dfa = DfaServer(cfg) save_my_pid(cfg) dfa.create_threads() while True: time.sleep(constants.MAIN_INTERVAL) if dfa.dcnm_dhcp: dfa.update_port_ip_address() else: dfa.check_dhcp_ports() for trd in dfa.dfa_threads: if not trd.am_i_active: LOG.info(_LI("Thread %s is not active."), trd.name) try: exc = trd._excq.get(block=False) except queue.Empty: pass else: trd_name = eval(exc).get('name') exc_tb = eval(exc).get('tb') LOG.error(_LE('Exception occurred in %(name)s thread. ' '%(tb)s'), {'name': trd_name, 'tb': exc_tb}) # Check on dfa agents cur_time = time.time() for agent, time_s in six.iteritems(dfa.agents_status_table): last_seen = time.mktime(time.strptime(time_s)) if abs(cur_time - last_seen - constants.MAIN_INTERVAL) > constants.HB_INTERVAL: LOG.error(_LE("Agent on %(host)s is not seen for %(sec)s. " "Last seen was %(time)s."), {'host': agent, 'sec': abs(cur_time - last_seen), 'time': time_s}) except Exception as exc: LOG.exception(_LE("ERROR: %s"), exc) except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/apps/saf/server/dfa_server.py/dfa_server
7,414
def test_iterator_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: it = itorg = iter(range(*t)) data = list(range(*t)) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), data) it = pickle.loads(d) try: next(it) except __HOLE__: continue d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(list(it), data[1:])
StopIteration
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_range.py/RangeTest.test_iterator_pickling
7,415
def test_lock(self): with FileLock(self.tmpfile, "r+w"): try: fcntl.flock(self.tmpfd, fcntl.LOCK_EX | fcntl.LOCK_NB) assert False, "Flock should have failed" except __HOLE__: pass
IOError
dataset/ETHPy150Open khamidou/kite/src/back/tests/test_lockfile.py/TestLockfile.test_lock
7,416
def __getattr__(self, attr): from .browser_integration import warning try: return getattr(self.webdriver, attr) except __HOLE__ as e: raise e except Exception as e: warning(str(e))
AttributeError
dataset/ETHPy150Open apiad/sublime-browser-integration/browser.py/Browser.__getattr__
7,417
def parse_profile_html(html): '''Parses a user profile page into the data fields and essays. It turns out parsing HTML, with all its escaped characters, and handling wacky unicode characters, and writing them all out to a CSV file (later), is a pain in the ass because everything that is not ASCII goes out of its way to make your life hard. During this function, we handle unescaping of html special characters. Later, before writing to csv, we force everything to ASCII, ignoring characters that don't play well. ''' html = html.lower() soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES) NA = 'NA' basics_soup = soup.find(name = 'div', attrs = {'id': 'basic_info'}) details_soup = soup.find(name='div', attrs={'id':'profile_details'}) essays_soup = soup.find(name='div', attrs={'id':'main_column'}) if not (basics_soup and details_soup and essays_soup): print 'Profile likely deleted. Missing expected html structure.' return None, None profile = {} # Extract top-line profile data (age, gender, city) from tags like # <span id='ajax_gender'>Female</span> for tag in basics_soup.findAll(name = 'span')[1:]: # Skip 'Last Online' feature, value = tag['id'].split('_', 1)[1], tag.text.strip() print feature, value profile[feature] = value # Extract personal data items from tags like # <dd id='ajax_bodytype'>Female</span> for tag in details_soup.findAll(name = 'dd'): try: feature, value = tag['id'].split('_', 1)[1], tag.text.strip() if feature == 'height': # Special case height to parse into inches print value feet, inches = [int(x[:-1]) for x in value.split()[:2]] value = str(int(feet) * 12 + int(inches)) print feature, value profile[feature] = value except __HOLE__: continue # Essays essays = {} for e in essays_soup.findAll('div', recursive=False): if e['id'].startswith('essay_'): essay_id = int(e['id'].split('essay_')[1]) title = e.a.text user_response = ''.join( str(x) for x in e.div.div.contents).replace('<br />', '').strip() essays[essay_id] = (title, user_response) elif e['id'] == 'what_i_want': # These are high-level details about what the user wants in a date # TODO: parse and incorporate these as profile features pass return profile, essays
KeyError
dataset/ETHPy150Open everett-wetchler/okcupid/FetchProfiles.py/parse_profile_html
7,418
def read_usernames(filename): '''Extracts usernames from the given file, returning a sorted list. The file should either be: 1) A list of usernames, one per line 2) A CSV file with a 'username' column (specified in its header line) ''' try: rows = [r for r in csv.reader(open(filename))] try: idx = rows[0].index('username') unames = [row[idx].lower() for row in rows[1:]] except __HOLE__: unames = [r[0] for r in rows] return sorted(set(unames)) except IOError, e: # File doesn't exist return []
ValueError
dataset/ETHPy150Open everett-wetchler/okcupid/FetchProfiles.py/read_usernames
7,419
def check_exists(fips_dir) : try : subprocess.check_output(['ccache', '--version']) return True except (__HOLE__, subprocess.CalledProcessError): return False
OSError
dataset/ETHPy150Open floooh/fips/mod/tools/ccache.py/check_exists
7,420
def main(argv): # pragma: no cover ip = "127.0.0.1" port = 5683 try: opts, args = getopt.getopt(argv, "hi:p:", ["ip=", "port="]) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt == '-h': usage() sys.exit() elif opt in ("-i", "--ip"): ip = arg elif opt in ("-p", "--port"): port = int(arg) server = CoAPServerPlugTest(ip, port) try: server.listen(10) except __HOLE__: print "Server Shutdown" server.close() print "Exiting..."
KeyboardInterrupt
dataset/ETHPy150Open Tanganelli/CoAPthon/plugtest_coapserver.py/main
7,421
def _get_version_output(self): """ Ignoring errors, call `ceph --version` and return only the version portion of the output. For example, output like:: ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg) Would return:: 9.0.1-1234kjd """ if not self.executable: return '' command = [self.executable, '--version'] out, _, _ = self._check(self.conn, command) try: return out.split()[2] except __HOLE__: return ''
IndexError
dataset/ETHPy150Open ceph/ceph-deploy/ceph_deploy/util/packages.py/Ceph._get_version_output
7,422
def _patched_convertTmplPathToModuleName(tmplPath): try: return splitdrive(tmplPath)[1].translate(_unitrans) except (UnicodeError, __HOLE__): return unicode(splitdrive(tmplPath)[1]).translate(_unitrans) # pylint: disable=E0602
TypeError
dataset/ETHPy150Open ohmu/poni/poni/template.py/_patched_convertTmplPathToModuleName
7,423
def render_genshi(source_text, source_path, variables): assert genshi, "Genshi is not installed" if source_path: source = open(source_path) else: source = StringIO(source_text) try: tmpl = genshi.template.MarkupTemplate(source, filepath=source_path) stream = tmpl.generate(**variables) return stream.render('xml') except (genshi.template.TemplateError, __HOLE__) as error: raise errors.TemplateError("{0}: {1}: {2}".format(source_path, error.__class__.__name__, error))
IOError
dataset/ETHPy150Open ohmu/poni/poni/template.py/render_genshi
7,424
def main(): (options, args_, parser_) = simple_options(_parser, __version__, __dependencies__) try: with open(options.input) as xml_file: xml_string = xml_file.read() # At the moment there doesn't seem to be an obvious way to extract the xmlns from the asset. # For now, we'll attempt to just remove it before transforming it into a Python object. # <COLLADA xmlns="http://www.collada.org/2005/11/COLLADASchema" version="1.4.1"> # ==> # <COLLADA version="1.4.1"> if options.namespace is False: xml_string = sub(' xmlns="[^"]*"', '', xml_string) json_string = xml2json(xml_string, indent=options.json_indent, convert_types=options.convert_types) if options.output: with open(options.output, 'w') as target: target.write(json_string) target.write('\n') else: print json_string except __HOLE__ as e: LOG.error(e) return e.errno except Exception as e: LOG.critical('Unexpected exception: %s', e) return 1
IOError
dataset/ETHPy150Open turbulenz/turbulenz_tools/turbulenz_tools/tools/xml2json.py/main
7,425
def _faa_di_bruno_partitions(n): """ Return all non-negative integer solutions of the diophantine equation:: n*k_n + ... + 2*k_2 + 1*k_1 = n (1) Parameters ---------- n: int the r.h.s. of Eq. (1) Returns ------- partitions: a list of solutions of (1). Each solution is itself a list of the form `[(m, k_m), ...]` for non-zero `k_m`. Notice that the index `m` is 1-based. Examples: --------- >>> _faa_di_bruno_partitions(2) [[(1, 2)], [(2, 1)]] >>> for p in faa_di_bruno_partitions(4): ... assert 4 == sum(m * k for (m, k) in p) """ if n < 1: raise ValueError("Expected a positive integer; got %s instead" % n) try: return _faa_di_bruno_cache[n] except __HOLE__: # TODO: higher order terms # solve Eq. (31) from Blinninkov & Moessner here raise NotImplementedError('Higher order terms not yet implemented.')
KeyError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/distributions/edgeworth.py/_faa_di_bruno_partitions
7,426
def _split_auth_string(auth_string): """ split a digest auth string into individual key=value strings """ prev = None for item in auth_string.split(","): try: if prev.count('"') == 1: prev = "%s,%s" % (prev, item) continue except __HOLE__: if prev == None: prev = item continue else: raise StopIteration yield prev.strip() prev = item yield prev.strip() raise StopIteration
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/auth/digest.py/_split_auth_string
7,427
def add_pending(self, f): # Ignore symlinks if os.path.islink(f): return # Get the file mode to check and see if it's a block/char device try: file_mode = os.stat(f).st_mode except __HOLE__ as e: return # Only add this to the pending list of files to scan # if the file is a regular file or a block/character device. if (stat.S_ISREG(file_mode) or stat.S_ISBLK(file_mode) or stat.S_ISCHR(file_mode)): self.pending.append(f)
OSError
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.add_pending
7,428
def callback(self, r): # Make sure the file attribute is set to a compatible instance of binwalk.core.common.BlockFile try: r.file.size except __HOLE__ as e: pass except Exception as e: return if not r.size: size = r.file.size - r.offset else: size = r.size # Only extract valid results that have been marked for extraction and displayed to the user. # Note that r.display is still True even if --quiet has been specified; it is False if the result has been # explicitly excluded via the -y/-x options. if r.valid and r.extract and r.display and (not self.max_count or self.extraction_count < self.max_count): # Create some extract output for this file, it it doesn't already exist if not binwalk.core.common.has_key(self.output, r.file.path): self.output[r.file.path] = ExtractInfo() # Attempt extraction binwalk.core.common.debug("Extractor callback for %s @%d [%s]" % (r.file.name, r.offset, r.description)) (extraction_directory, dd_file, scan_extracted_files) = self.extract(r.offset, r.description, r.file.path, size, r.name) # If the extraction was successful, self.extract will have returned the output directory and name of the dd'd file if extraction_directory and dd_file: # Track the number of extracted files self.extraction_count += 1 # Get the full path to the dd'd file and save it in the output info for this file dd_file_path = os.path.join(extraction_directory, dd_file) self.output[r.file.path].carved[r.offset] = dd_file_path self.output[r.file.path].extracted[r.offset] = [] # Do a directory listing of the output directory directory_listing = set(os.listdir(extraction_directory)) # If this is a newly created output directory, self.last_directory_listing won't have a record of it. # If we've extracted other files to this directory before, it will. if not has_key(self.last_directory_listing, extraction_directory): self.last_directory_listing[extraction_directory] = set() # Loop through a list of newly created files (i.e., files that weren't listed in the last directory listing) for f in directory_listing.difference(self.last_directory_listing[extraction_directory]): # Build the full file path and add it to the extractor results file_path = os.path.join(extraction_directory, f) real_file_path = os.path.realpath(file_path) self.result(description=file_path, display=False) # Also keep a list of files created by the extraction utility if real_file_path != dd_file_path: self.output[r.file.path].extracted[r.offset].append(real_file_path) # If recursion was specified, and the file is not the same one we just dd'd if (self.matryoshka and file_path != dd_file_path and scan_extracted_files and self.directory in real_file_path): # If the recursion level of this file is less than or equal to our desired recursion level if len(real_file_path.split(self.directory)[1].split(os.path.sep)) <= self.matryoshka: # If this is a directory and we are supposed to process directories for this extractor, # then add all files under that directory to the list of pending files. if os.path.isdir(file_path): for root, dirs, files in os.walk(file_path): for f in files: full_path = os.path.join(root, f) self.add_pending(full_path) # If it's just a file, it to the list of pending files else: self.add_pending(file_path) # Update the last directory listing for the next time we extract a file to this same output directory self.last_directory_listing[extraction_directory] = directory_listing
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.callback
7,429
def add_rule(self, txtrule=None, regex=None, extension=None, cmd=None, codes=[0, None], recurse=True): ''' Adds a set of rules to the extraction rule list. @txtrule - Rule string, or list of rule strings, in the format <regular expression>:<file extension>[:<command to run>] @regex - If rule string is not specified, this is the regular expression string to use. @extension - If rule string is not specified, this is the file extension to use. @cmd - If rule string is not specified, this is the command to run. Alternatively a callable object may be specified, which will be passed one argument: the path to the file to extract. @codes - A list of valid return codes for the extractor. @recurse - If False, extracted directories will not be recursed into when the matryoshka option is enabled. Returns None. ''' rules = [] match = False r = { 'extension' : '', 'cmd' : '', 'regex' : None, 'codes' : codes, 'recurse' : recurse, } # Process single explicitly specified rule if not txtrule and regex and extension: r['extension'] = extension r['regex'] = re.compile(regex) if cmd: r['cmd'] = cmd self.append_rule(r) return # Process rule string, or list of rule strings if not isinstance(txtrule, type([])): rules = [txtrule] else: rules = txtrule for rule in rules: r['cmd'] = '' r['extension'] = '' try: values = self._parse_rule(rule) match = values[0] r['regex'] = re.compile(values[0]) r['extension'] = values[1] r['cmd'] = values[2] r['codes'] = values[3] r['recurse'] = values[4] except __HOLE__ as e: raise e except Exception: pass # Verify that the match string was retrieved. if match: self.append_rule(r)
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.add_rule
7,430
def load_from_file(self, fname): ''' Loads extraction rules from the specified file. @fname - Path to the extraction rule file. Returns None. ''' try: # Process each line from the extract file, ignoring comments with open(fname, 'r') as f: for rule in f.readlines(): self.add_rule(rule.split(self.COMMENT_DELIM, 1)[0]) except __HOLE__ as e: raise e except Exception as e: raise Exception("Extractor.load_from_file failed to load file '%s': %s" % (fname, str(e)))
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.load_from_file
7,431
def load_defaults(self): ''' Loads default extraction rules from the user and system extract.conf files. Returns None. ''' # Load the user extract file first to ensure its rules take precedence. extract_files = [ self.config.settings.user.extract, self.config.settings.system.extract, ] for extract_file in extract_files: if extract_file: try: self.load_from_file(extract_file) except __HOLE__ as e: raise e except Exception as e: if binwalk.core.common.DEBUG: raise Exception("Extractor.load_defaults failed to load file '%s': %s" % (extract_file, str(e)))
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.load_defaults
7,432
def build_output_directory(self, path): ''' Set the output directory for extracted files. @path - The path to the file that data will be extracted from. Returns None. ''' # If we have not already created an output directory for this target file, create one now if not has_key(self.extraction_directories, path): basedir = os.path.dirname(path) basename = os.path.basename(path) # Make sure we put the initial extraction directory in the CWD if self.directory is None: self.directory = os.getcwd() if basedir != self.directory: # During recursive extraction, extracted files will be in subdirectories # of the CWD. This allows us to figure out the subdirectory by simply # splitting the target file's base directory on our known CWD. # # However, the very *first* file being scanned is not necessarily in the # CWD, so this will raise an IndexError. This is easy to handle though, # since the very first file being scanned needs to have its contents # extracted to ${CWD}/_basename.extracted, so we just set the subdir # variable to a blank string when an IndexError is encountered. try: subdir = basedir.split(self.directory)[1][1:] except __HOLE__ as e: subdir = "" else: subdir = "" if self.output_directory_override: output_directory = os.path.join(self.directory, subdir, self.output_directory_override) else: outdir = os.path.join(self.directory, subdir, '_' + basename) output_directory = unique_file_name(outdir, extension='extracted') if not os.path.exists(output_directory): os.mkdir(output_directory) self.extraction_directories[path] = output_directory self.output[path].directory = os.path.realpath(output_directory) + os.path.sep # Else, just use the already created directory else: output_directory = self.extraction_directories[path] return output_directory
IndexError
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.build_output_directory
7,433
def extract(self, offset, description, file_name, size, name=None): ''' Extract an embedded file from the target file, if it matches an extract rule. Called automatically by Binwalk.scan(). @offset - Offset inside the target file to begin the extraction. @description - Description of the embedded file to extract, as returned by libmagic. @file_name - Path to the target file. @size - Number of bytes to extract. @name - Name to save the file as. Returns the name of the extracted file (blank string if nothing was extracted). ''' fname = '' recurse = False original_dir = os.getcwd() rules = self.match(description) file_path = os.path.realpath(file_name) # No extraction rules for this file if not rules: return (None, None, False) else: binwalk.core.common.debug("Found %d matching extraction rules" % len(rules)) # Generate the output directory name where extracted files will be stored output_directory = self.build_output_directory(file_name) # Extract to end of file if no size was specified if not size: size = file_size(file_path) - offset if os.path.isfile(file_path): os.chdir(output_directory) # Loop through each extraction rule until one succeeds for i in range(0, len(rules)): rule = rules[i] # Make sure we don't recurse into any extracted directories if instructed not to if rule['recurse'] in [True, False]: recurse = rule['recurse'] else: recurse = True # Copy out the data to disk, if we haven't already fname = self._dd(file_path, offset, size, rule['extension'], output_file_name=name) # If there was a command specified for this rule, try to execute it. # If execution fails, the next rule will be attempted. if rule['cmd']: # Note the hash of the original file; if --rm is specified and the # extraction utility modifies the original file rather than creating # a new one (AFAIK none currently do, but could happen in the future), # we don't want to remove this file. if self.remove_after_execute: fname_md5 = file_md5(fname) # Execute the specified command against the extracted file if self.run_extractors: extract_ok = self.execute(rule['cmd'], fname, rule['codes']) else: extract_ok = True # Only clean up files if remove_after_execute was specified if extract_ok == True and self.remove_after_execute: # Remove the original file that we extracted, # if it has not been modified by the extractor. try: if file_md5(fname) == fname_md5: os.unlink(fname) except __HOLE__ as e: raise e except Exception as e: pass # If the command executed OK, don't try any more rules if extract_ok == True: break # Else, remove the extracted file if this isn't the last rule in the list. # If it is the last rule, leave the file on disk for the user to examine. elif i != (len(rules)-1): try: os.unlink(fname) except KeyboardInterrupt as e: raise e except Exception as e: pass # If there was no command to execute, just use the first rule else: break os.chdir(original_dir) return (output_directory, fname, recurse)
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.extract
7,434
def _parse_rule(self, rule): ''' Parses an extraction rule. @rule - Rule string. Returns an array of ['<case insensitive matching string>', '<file extension>', '<command to run>', '<comma separated return codes>', <recurse into extracted directories: True|False>]. ''' values = rule.strip().split(self.RULE_DELIM, 4) if len(values) >= 4: codes = values[3].split(',') for i in range(0, len(codes)): try: codes[i] = int(codes[i], 0) except __HOLE__ as e: binwalk.core.common.warning("The specified return code '%s' for extractor '%s' is not a valid number!" % (codes[i], values[0])) values[3] = codes if len(values) >= 5: values[4] = (values[4].lower() == 'true') return values
ValueError
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor._parse_rule
7,435
def _dd(self, file_name, offset, size, extension, output_file_name=None): ''' Extracts a file embedded inside the target file. @file_name - Path to the target file. @offset - Offset inside the target file where the embedded file begins. @size - Number of bytes to extract. @extension - The file exension to assign to the extracted file on disk. @output_file_name - The requested name of the output file. Returns the extracted file name. ''' total_size = 0 # Default extracted file name is <displayed hex offset>.<extension> default_bname = "%X" % (offset + self.config.base) if self.max_size and size > self.max_size: size = self.max_size if not output_file_name or output_file_name is None: bname = default_bname else: # Strip the output file name of invalid/dangerous characters (like file paths) bname = os.path.basename(output_file_name) fname = unique_file_name(bname, extension) try: # If byte swapping is enabled, we need to start reading at a swap-size # aligned offset, then index in to the read data appropriately. if self.config.swap_size: adjust = offset % self.config.swap_size else: adjust = 0 offset -= adjust # Open the target file and seek to the offset fdin = self.config.open_file(file_name) fdin.seek(offset) # Open the output file try: fdout = BlockFile(fname, 'w') except __HOLE__ as e: raise e except Exception as e: # Fall back to the default name if the requested name fails fname = unique_file_name(default_bname, extension) fdout = BlockFile(fname, 'w') while total_size < size: (data, dlen) = fdin.read_block() if not data: break else: total_size += (dlen-adjust) if total_size > size: dlen -= (total_size - size) fdout.write(str2bytes(data[adjust:dlen])) adjust = 0 # Cleanup fdout.close() fdin.close() except KeyboardInterrupt as e: raise e except Exception as e: raise Exception("Extractor.dd failed to extract data from '%s' to '%s': %s" % (file_name, fname, str(e))) binwalk.core.common.debug("Carved data block 0x%X - 0x%X from '%s' to '%s'" % (offset, offset+size, file_name, fname)) return fname
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor._dd
7,436
def execute(self, cmd, fname, codes=[0, None]): ''' Execute a command against the specified file. @cmd - Command to execute. @fname - File to run command against. @codes - List of return codes indicating cmd success. Returns True on success, False on failure, or None if the external extraction utility could not be found. ''' tmp = None rval = 0 retval = True binwalk.core.common.debug("Running extractor '%s'" % str(cmd)) try: if callable(cmd): try: retval = cmd(fname) except __HOLE__ as e: raise e except Exception as e: binwalk.core.common.warning("Internal extractor '%s' failed with exception: '%s'" % (str(cmd), str(e))) elif cmd: # If not in debug mode, create a temporary file to redirect stdout and stderr to if not binwalk.core.common.DEBUG: tmp = tempfile.TemporaryFile() # Execute. for command in cmd.split("&&"): # Generate unique file paths for all paths in the current command that are surrounded by UNIQUE_PATH_DELIMITER while self.UNIQUE_PATH_DELIMITER in command: need_unique_path = command.split(self.UNIQUE_PATH_DELIMITER)[1].split(self.UNIQUE_PATH_DELIMITER)[0] unique_path = binwalk.core.common.unique_file_name(need_unique_path) command = command.replace(self.UNIQUE_PATH_DELIMITER + need_unique_path + self.UNIQUE_PATH_DELIMITER, unique_path) # Replace all instances of FILE_NAME_PLACEHOLDER in the command with fname command = command.strip().replace(self.FILE_NAME_PLACEHOLDER, fname) binwalk.core.common.debug("subprocess.call(%s, stdout=%s, stderr=%s)" % (command, str(tmp), str(tmp))) rval = subprocess.call(shlex.split(command), stdout=tmp, stderr=tmp) if rval in codes: retval = True else: retval = False binwalk.core.common.debug('External extractor command "%s" completed with return code %d (success: %s)' % (cmd, rval, str(retval))) # TODO: Should errors from all commands in a command string be checked? Currently we only support # specifying one set of error codes, so at the moment, this is not done; it is up to the # final command to return success or failure (which presumably it will if previous necessary # commands were not successful, but this is an assumption). #if retval == False: # break except KeyboardInterrupt as e: raise e except Exception as e: binwalk.core.common.warning("Extractor.execute failed to run external extractor '%s': %s" % (str(cmd), str(e))) retval = None if tmp is not None: tmp.close() return retval
KeyboardInterrupt
dataset/ETHPy150Open devttys0/binwalk/src/binwalk/modules/extractor.py/Extractor.execute
7,437
def smisk_mvc_metadata(conf): '''This config filter configures the underlying Elixir and SQLAlchemy modules. ''' global log conf = conf.get('smisk.mvc.model') if not conf: return # Aquire required parameter "url" try: url = conf['url'] except __HOLE__: log.warn('missing required "url" parameter in "smisk.mvc.model" config') return # Parse url into an accessible structure from smisk.core import URL, Application url_st = URL(url) # Make a copy of the default options engine_opts = default_engine_opts.copy() # MySQL if url_st.scheme.lower() == 'mysql': if 'poolclass' not in conf: conf['poolclass'] = MySQLConnectionPool log.debug('MySQL: setting poolclass=%r', conf['poolclass']) if 'pool_size' in conf: log.debug('MySQL: disabling pool_size') del conf['pool_size'] if 'pool_size' in engine_opts: del engine_opts['pool_size'] elif 'pool_recycle' not in conf and 'pool_recycle' not in engine_opts: # In case of user-configured custom pool_class conf['pool_recycle'] = 3600 log.debug('MySQL: setting pool_recycle=%r', conf['pool_recycle']) elif 'poolclass' not in conf: # Others than MySQL should also use a kind of static pool conf['poolclass'] = SingleProcessPool # Demux configuration elixir_opts = {} for k,v in conf.items(): if k.startswith('elixir.'): elixir_opts[k[7:]] = v elif k != 'url': engine_opts[k] = v # Apply Elixir default options if elixir_opts: log.info('applying Elixir default options %r', elixir_opts) # We apply by iteration since options_defaults is not # guaranteed to be a real dict. for k,v in elixir_opts.items(): options_defaults[k] = v # Mask out password, since we're logging this if url_st.password: url_st.password = '***' def rebind_model_metadata(): # Dispose any previous connection if metadata.bind and hasattr(metadata.bind, 'dispose'): log.debug('disposing old connection %r', metadata.bind) try: metadata.bind.dispose() except Exception, e: if e.args and e.args[0] and 'SQLite objects created in a thread' in e.args[0]: log.debug('SQLite connections can not be disposed from other threads'\ ' -- simply leaving it to the GC') else: log.warn('failed to properly dispose the connection', exc_info=True) # Create, configure and bind engine if engine_opts: log.info('binding to %r with options %r', str(url_st), engine_opts) else: log.info('binding to %r', str(url_st)) metadata.bind = sql.create_engine(url, **engine_opts) # Queue action or call it directly if hasattr(Application.current, '_pending_rebind_model_metadata'): log.info('queued pending metadata rebind') Application.current._pending_rebind_model_metadata = rebind_model_metadata else: # Run in this thread -- might cause problems with thread-local stored connections rebind_model_metadata()
KeyError
dataset/ETHPy150Open rsms/smisk/lib/smisk/mvc/model.py/smisk_mvc_metadata
7,438
def build_spider_registry(config): SPIDER_REGISTRY.clear() opt_modules = [] opt_modules = config['global'].get('spider_modules', []) for path in opt_modules: if ':' in path: path, cls_name = path.split(':') else: cls_name = None try: mod = __import__(path, None, None, ['foo']) except __HOLE__ as ex: if path not in six.text_type(ex): logging.error('', exc_info=ex) else: for key in dir(mod): if key == 'Spider': continue if cls_name is None or key == cls_name: val = getattr(mod, key) if isinstance(val, type) and issubclass(val, Spider): if val.Meta.abstract: pass else: spider_name = val.get_spider_name() logger.debug( 'Module `%s`, found spider `%s` ' 'with name `%s`' % ( path, val.__name__, spider_name)) if spider_name in SPIDER_REGISTRY: mod = SPIDER_REGISTRY[spider_name].__module__ raise SpiderInternalError( 'There are two different spiders with ' 'the same name "%s". ' 'Modules: %s and %s' % ( spider_name, mod, val.__module__)) else: SPIDER_REGISTRY[spider_name] = val return SPIDER_REGISTRY
ImportError
dataset/ETHPy150Open lorien/grab/grab/util/module.py/build_spider_registry
7,439
def _get_size(self): try: return self._size except AttributeError: try: self._size = self.file.size except __HOLE__: self._size = len(self.file.getvalue()) return self._size
AttributeError
dataset/ETHPy150Open benoitbryon/django-downloadview/django_downloadview/files.py/VirtualFile._get_size
7,440
@property def request(self): try: return self._request except __HOLE__: self._request = self.request_factory(self.url, **self.request_kwargs) return self._request
AttributeError
dataset/ETHPy150Open benoitbryon/django-downloadview/django_downloadview/files.py/HTTPFile.request
7,441
@property def file(self): try: return self._file except __HOLE__: content = self.request.iter_content(decode_unicode=False) self._file = BytesIteratorIO(content) return self._file
AttributeError
dataset/ETHPy150Open benoitbryon/django-downloadview/django_downloadview/files.py/HTTPFile.file
7,442
def __getattribute__(self, name): try: return super(PersistentData, self).__getattribute__(name) except __HOLE__, err: typ, default = self.__class__._ATTRS.get(name, (None, None)) if typ is not None: return default raise
AttributeError
dataset/ETHPy150Open kdart/pycopia/storage/pycopia/durusplus/persistent_data.py/PersistentData.__getattribute__
7,443
def _get_cache(self, name, constructor): try: return self.__dict__["_cache"][name] except __HOLE__: obj = constructor() self.__dict__["_cache"][name] = obj return obj
KeyError
dataset/ETHPy150Open kdart/pycopia/storage/pycopia/durusplus/persistent_data.py/PersistentData._get_cache
7,444
def _del_cache(self, name, destructor=None): try: obj = self.__dict__["_cache"].pop(name) except __HOLE__: return else: if destructor: destructor(obj)
KeyError
dataset/ETHPy150Open kdart/pycopia/storage/pycopia/durusplus/persistent_data.py/PersistentData._del_cache
7,445
@classmethod def clean_status_file(cls): """ Removes JMX status files """ try: os.remove(os.path.join(cls._get_dir(), cls._STATUS_FILE)) except OSError: pass try: os.remove(os.path.join(cls._get_dir(), cls._PYTHON_STATUS_FILE)) except __HOLE__: pass
OSError
dataset/ETHPy150Open serverdensity/sd-agent/utils/jmx.py/JMXFiles.clean_status_file
7,446
@classmethod def clean_exit_file(cls): """ Remove exit file trigger -may not exist-. Note: Windows only """ try: os.remove(os.path.join(cls._get_dir(), cls._JMX_EXIT_FILE)) except __HOLE__: pass
OSError
dataset/ETHPy150Open serverdensity/sd-agent/utils/jmx.py/JMXFiles.clean_exit_file
7,447
def paintContent(self, target): """Paints any needed component-specific things to the given UIDL stream. @see: L{AbstractComponent.paintContent} """ self._initialPaint = False if self._partialUpdate: target.addAttribute('partialUpdate', True) target.addAttribute('rootKey', self.itemIdMapper.key(self._expandedItemId)) else: self.getCaptionChangeListener().clear() # The tab ordering number if self.getTabIndex() > 0: target.addAttribute('tabindex', self.getTabIndex()) # Paint tree attributes if self.isSelectable(): if self.isMultiSelect(): target.addAttribute('selectmode', 'multi') else: target.addAttribute('selectmode', 'single') if self.isMultiSelect(): try: idx = MultiSelectMode.values().index( self._multiSelectMode) except ValueError: idx = -1 target.addAttribute('multiselectmode', idx) else: target.addAttribute('selectmode', 'none') if self.isNewItemsAllowed(): target.addAttribute('allownewitem', True) if self.isNullSelectionAllowed(): target.addAttribute('nullselect', True) if self._dragMode != TreeDragMode.NONE: target.addAttribute('dragMode', TreeDragMode.ordinal(self._dragMode)) # Initialize variables actionSet = OrderedSet() # rendered selectedKeys selectedKeys = list() expandedKeys = list() # Iterates through hierarchical tree using a stack of iterators iteratorStack = deque() if self._partialUpdate: ids = self.getChildren(self._expandedItemId) else: ids = self.rootItemIds() if ids is not None: iteratorStack.append( iter(ids) ) # Body actions - Actions which has the target null and can be invoked # by right clicking on the Tree body if self._actionHandlers is not None: keys = list() for ah in self._actionHandlers: # Getting action for the null item, which in this case # means the body item aa = ah.getActions(None, self) if aa is not None: for ai in range(len(aa)): akey = self._actionMapper.key(aa[ai]) actionSet.add(aa[ai]) keys.append(akey) target.addAttribute('alb', keys) while len(iteratorStack) > 0: # Gets the iterator for current tree level i = iteratorStack[-1] # peek try: # Adds the item on current level itemId = i.next() # Starts the item / node isNode = self.areChildrenAllowed(itemId) if isNode: target.startTag('node') else: target.startTag('leaf') if self._itemStyleGenerator is not None: stylename = self._itemStyleGenerator.getStyle(itemId) if stylename is not None: target.addAttribute('style', stylename) if self._itemDescriptionGenerator is not None: description = self._itemDescriptionGenerator\ .generateDescription(self, itemId, None) if description is not None and description != "": target.addAttribute("descr", description) # Adds the attributes target.addAttribute('caption', self.getItemCaption(itemId)) icon = self.getItemIcon(itemId) if icon is not None: target.addAttribute('icon', self.getItemIcon(itemId)) key = self.itemIdMapper.key(itemId) target.addAttribute('key', key) if self.isSelected(itemId): target.addAttribute('selected', True) selectedKeys.append(key) if self.areChildrenAllowed(itemId) and self.isExpanded(itemId): target.addAttribute('expanded', True) expandedKeys.append(key) # Add caption change listener self.getCaptionChangeListener().addNotifierForItem(itemId) # Actions if self._actionHandlers is not None: keys = list() ahi = iter(self._actionHandlers) while True: try: aa = ahi.next().getActions(itemId, self) if aa is not None: for ai in range(len(aa)): akey = self._actionMapper.key(aa[ai]) actionSet.add(aa[ai]) keys.append(akey) except StopIteration: break target.addAttribute('al', keys) # Adds the children if expanded, or close the tag if (self.isExpanded(itemId) and self.hasChildren(itemId) and self.areChildrenAllowed(itemId)): iteratorStack.append( iter(self.getChildren(itemId)) ) elif isNode: target.endTag('node') else: target.endTag('leaf') # If the level is finished, back to previous tree level except __HOLE__: # Removes used iterator from the stack iteratorStack.pop() # Closes node if len(iteratorStack) > 0: target.endTag('node') # Actions if len(actionSet) > 0: target.addVariable(self, 'action', '') target.startTag('actions') i = actionSet for a in actionSet: target.startTag('action') if a.getCaption() is not None: target.addAttribute('caption', a.getCaption()) if a.getIcon() is not None: target.addAttribute('icon', a.getIcon()) target.addAttribute('key', self._actionMapper.key(a)) target.endTag('action') target.endTag('actions') if self._partialUpdate: self._partialUpdate = False else: # Selected target.addVariable(self, 'selected', selectedKeys) # Expand and collapse target.addVariable(self, 'expand', list()) target.addVariable(self, 'collapse', list()) # New items target.addVariable(self, 'newitem', list()) if self._dropHandler is not None: self._dropHandler.getAcceptCriterion().paint(target)
StopIteration
dataset/ETHPy150Open rwl/muntjac/muntjac/ui/tree.py/Tree.paintContent
7,448
def getVisibleItemIds(self): """Gets the visible item ids. @see: L{Select.getVisibleItemIds} """ visible = list() # Iterates trough hierarchical tree using a stack of iterators iteratorStack = deque() ids = self.rootItemIds() if ids is not None: iteratorStack.append(ids) while len(iteratorStack) > 0: # Gets the iterator for current tree level i = iter( iteratorStack[-1] ) # If the level is finished, back to previous tree level try: itemId = i.next() visible.append(itemId) # Adds children if expanded, or close the tag if self.isExpanded(itemId) and self.hasChildren(itemId): iteratorStack.append( self.getChildren(itemId) ) except __HOLE__: # Removes used iterator from the stack # Adds the item on current level iteratorStack.pop() return visible
StopIteration
dataset/ETHPy150Open rwl/muntjac/muntjac/ui/tree.py/Tree.getVisibleItemIds
7,449
def clean(self, value): from .us_states import STATES_NORMALIZED super(USStateField, self).clean(value) if value in EMPTY_VALUES: return '' try: value = value.strip().lower() except __HOLE__: pass else: try: return STATES_NORMALIZED[value.strip().lower()] except KeyError: pass raise ValidationError(self.error_messages['invalid'])
AttributeError
dataset/ETHPy150Open django/django-localflavor/localflavor/us/forms.py/USStateField.clean
7,450
def __get__(self, instance, instance_type=None): if instance is None: return self try: return getattr(instance, self.cache_attr) except __HOLE__: rel_obj = None # Make sure to use ContentType.objects.get_for_id() to ensure that # lookups are cached (see ticket #5570). This takes more code than # the naive ``getattr(instance, self.ct_field)``, but has better # performance when dealing with GFKs in loops and such. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) if ct_id: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field)) except ObjectDoesNotExist: pass setattr(instance, self.cache_attr, rel_obj) return rel_obj
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/contenttypes/generic.py/GenericForeignKey.__get__
7,451
def create_generic_related_manager(superclass): """ Factory function for a manager that subclasses 'superclass' (which is a Manager) and adds behavior for generic related objects. """ class GenericRelatedObjectManager(superclass): def __init__(self, model=None, instance=None, symmetrical=None, source_col_name=None, target_col_name=None, content_type=None, content_type_field_name=None, object_id_field_name=None, prefetch_cache_name=None): super(GenericRelatedObjectManager, self).__init__() self.model = model self.content_type = content_type self.symmetrical = symmetrical self.instance = instance self.source_col_name = source_col_name self.target_col_name = target_col_name self.content_type_field_name = content_type_field_name self.object_id_field_name = object_id_field_name self.prefetch_cache_name = prefetch_cache_name self.pk_val = self.instance._get_pk_val() self.core_filters = { '%s__pk' % content_type_field_name: content_type.id, '%s__exact' % object_id_field_name: instance._get_pk_val(), } def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, __HOLE__): db = self._db or router.db_for_read(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters) def get_prefetch_queryset(self, instances): db = self._db or router.db_for_read(self.model, instance=instances[0]) query = { '%s__pk' % self.content_type_field_name: self.content_type.id, '%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances) } qs = super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**query) # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python return (qs, lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)), lambda obj: obj._get_pk_val(), False, self.prefetch_cache_name) def add(self, *objs): for obj in objs: if not isinstance(obj, self.model): raise TypeError("'%s' instance expected" % self.model._meta.object_name) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) obj.save() add.alters_data = True def remove(self, *objs): db = router.db_for_write(self.model, instance=self.instance) for obj in objs: obj.delete(using=db) remove.alters_data = True def clear(self): db = router.db_for_write(self.model, instance=self.instance) for obj in self.all(): obj.delete(using=db) clear.alters_data = True def create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).create(**kwargs) create.alters_data = True return GenericRelatedObjectManager
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/contrib/contenttypes/generic.py/create_generic_related_manager
7,452
def get_formatter(format): """Returns the formatter for the given format""" try: return _FORMATTERS_REGISTER[format] except __HOLE__: raise RuntimeError('No formatter registered for format "%s"' % format)
KeyError
dataset/ETHPy150Open fiam/wapi/formatters/__init__.py/get_formatter
7,453
def getid(obj): """Return id if argument is a Resource. Abstracts the common pattern of allowing both an object or an object's ID (UUID) as a parameter when dealing with relationships. """ try: if obj.uuid: return obj.uuid except __HOLE__: pass try: return obj.id except AttributeError: return obj # TODO(aababilov): call run_hooks() in HookableMixin's child classes
AttributeError
dataset/ETHPy150Open openstack/python-solumclient/solumclient/openstack/common/apiclient/base.py/getid
7,454
def _list(self, url, response_key=None, obj_class=None, json=None): """List the collection. :param url: a partial URL, e.g., '/servers' :param response_key: the key to be looked up in response dictionary, e.g., 'servers'. If response_key is None - all response body will be used. :param obj_class: class for constructing the returned objects (self.resource_class will be used by default) :param json: data that will be encoded as JSON and passed in POST request (GET will be sent by default) """ if json: body = self.client.post(url, json=json).json() else: body = self.client.get(url).json() if obj_class is None: obj_class = self.resource_class data = body[response_key] if response_key is not None else body # NOTE(ja): keystone returns values as list as {'values': [ ... ]} # unlike other services which just return the list... try: data = data['values'] except (KeyError, __HOLE__): pass return [obj_class(self, res, loaded=True) for res in data if res]
TypeError
dataset/ETHPy150Open openstack/python-solumclient/solumclient/openstack/common/apiclient/base.py/BaseManager._list
7,455
def findall(self, **kwargs): """Find all items with attributes matching ``**kwargs``. This isn't very efficient: it loads the entire list then filters on the Python side. """ found = [] searches = kwargs.items() for obj in self.list(): try: if all(getattr(obj, attr) == value for (attr, value) in searches): found.append(obj) except __HOLE__: continue return found
AttributeError
dataset/ETHPy150Open openstack/python-solumclient/solumclient/openstack/common/apiclient/base.py/ManagerWithFind.findall
7,456
def _parse_extension_module(self): self.manager_class = None for attr_name, attr_value in self.module.__dict__.items(): if attr_name in self.SUPPORTED_HOOKS: self.add_hook(attr_name, attr_value) else: try: if issubclass(attr_value, BaseManager): self.manager_class = attr_value except __HOLE__: pass
TypeError
dataset/ETHPy150Open openstack/python-solumclient/solumclient/openstack/common/apiclient/base.py/Extension._parse_extension_module
7,457
def _add_details(self, info): for (k, v) in six.iteritems(info): try: setattr(self, k, v) self._info[k] = v except __HOLE__: # In this case we already defined the attribute on the class pass
AttributeError
dataset/ETHPy150Open openstack/python-solumclient/solumclient/openstack/common/apiclient/base.py/Resource._add_details
7,458
def __init__(self): cmd.Cmd.__init__(self) self.logged_in = threading.Event() self.logged_out = threading.Event() self.logged_out.set() self.session = spotify.Session() self.session.on( spotify.SessionEvent.CONNECTION_STATE_UPDATED, self.on_connection_state_changed) self.session.on( spotify.SessionEvent.END_OF_TRACK, self.on_end_of_track) try: self.audio_driver = spotify.AlsaSink(self.session) except __HOLE__: self.logger.warning( 'No audio sink found; audio playback unavailable.') self.event_loop = spotify.EventLoop(self.session) self.event_loop.start()
ImportError
dataset/ETHPy150Open mopidy/pyspotify/examples/shell.py/Commander.__init__
7,459
def do_play_uri(self, line): "play <spotify track uri>" if not self.logged_in.is_set(): self.logger.warning('You must be logged in to play') return try: track = self.session.get_track(line) track.load() except (__HOLE__, spotify.Error) as e: self.logger.warning(e) return self.logger.info('Loading track into player') self.session.player.load(track) self.logger.info('Playing track') self.session.player.play()
ValueError
dataset/ETHPy150Open mopidy/pyspotify/examples/shell.py/Commander.do_play_uri
7,460
def reset(self): codecs.StreamReader.reset(self) try: del self.decode except __HOLE__: pass
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/encodings/utf_16.py/StreamReader.reset
7,461
def get_status(self, xmlresult, xml): """Gets the status code of T1 XML. If code is valid, returns None; otherwise raises the appropriate Error. """ status = xmlresult.find('status') if status is None: raise T1Error(None, xml) status_code = status.attrib['code'] message = status.text try: exc = STATUS_CODES[status_code] except __HOLE__: self.status_code = False raise T1Error(status_code, message) if exc is None: self.status_code = True return self.status_code = False if exc is True: message = self._parse_field_error(xmlresult) exc = ValidationError raise exc(code=status_code, content=message)
KeyError
dataset/ETHPy150Open MediaMath/t1-python/terminalone/xmlparser.py/XMLParser.get_status
7,462
def main(sysargs=None): from bloom.config import upconvert_bloom_to_config_branch upconvert_bloom_to_config_branch() parser = get_argument_parser() parser = add_global_arguments(parser) args = parser.parse_args(sysargs) handle_global_arguments(args) # Check that the current directory is a serviceable git/bloom repo try: ensure_clean_working_env() ensure_git_root() except __HOLE__: parser.print_usage() raise git_clone = GitClone() with git_clone: import_upstream( args.archive_path, args.patches_path, args.release_version, args.name, args.replace) git_clone.commit() info("I'm happy. You should be too.")
SystemExit
dataset/ETHPy150Open ros-infrastructure/bloom/bloom/commands/git/import_upstream.py/main
7,463
def __init__(self, module): self.host = module.params["host"] self.user = module.params["user"] self.password = module.params["password"] self.state = module.params["state"] try: self.payload = json.loads(module.params.get("payload")) except __HOLE__: self.payload = '' self.resource_id = module.params.get("resource_id") self.resource_key = module.params.get("resource_key") self.collection_path = module.params["collection_path"] self.hosturl = "https://%s" % self.host self.auth = (self.user, self.password)
TypeError
dataset/ETHPy150Open F5Networks/aws-deployments/src/f5_aws/test/manual_bigip_config.py/BigipConfig.__init__
7,464
def http(self, method, host, payload=''): print 'HTTP %s %s: %s' % (method, host, payload) methodfn = getattr(requests, method.lower(), None) if method is None: raise NotImplementedError("requests module has not method %s " % method) try: if payload != '': request = methodfn(url='%s/%s' % (self.hosturl, host), data=json.dumps(payload), auth=self.auth, verify=False) else: request = methodfn(url='%s/%s' % (self.hosturl, host), auth=self.auth, verify=False) if request.status_code != requests.codes.ok: request.raise_for_status() rc = 0 out = json.loads(request.text) err = '' except (ConnectionError, __HOLE__, Timeout, TooManyRedirects) as e: rc = 1 out = '' err = '%s. Error received: %s.\n Sent request: %s' % ( e.message, json.loads(request.text), 'HTTP %s %s: %s' % (method, host, payload)) print 'HTTP %s returned: %s' % (method, request.text) return (rc, out, err)
HTTPError
dataset/ETHPy150Open F5Networks/aws-deployments/src/f5_aws/test/manual_bigip_config.py/BigipConfig.http
7,465
def peek_last(self): try: return self[0] except __HOLE__: return None
IndexError
dataset/ETHPy150Open wiliamsouza/hystrix-py/hystrix/rolling_number.py/BucketCircular.peek_last
7,466
def _read_metadata(self): """Read metadata for current frame and return as dict""" try: tags = self.im.tag_v2 # for Pillow >= v3.0.0 except AttributeError: tags = self.im.tag # for Pillow < v3.0.0 md = {} try: md["ImageDescription"] = tags[270] except __HOLE__: pass try: md["DateTime"] = _tiff_datetime(tags[306]) except KeyError: pass try: md["Software"] = tags[305] except KeyError: pass try: md["DocumentName"] = tags[269] except KeyError: pass return md
KeyError
dataset/ETHPy150Open soft-matter/pims/pims/tiff_stack.py/TiffStack_pil._read_metadata
7,467
def __init__(self, filename = "", sid_params = {}, force_read_timestamp = False): """Two ways to create a SIDfile: 1) A file already exists and you want to read it: use 'filename' 2) A new empty file needs to be created: use 'sid_params' to indicate the parameters of the file's header. The dictionary retrieved from a config file can be used. Usually this means you need to write that file after data collection. Note: only one or the other parameter should be given. If both are given then 'filename' is taken and 'sid_params' is ignored. """ self.version = "1.4 20150801" self.filename = filename self.sid_params = sid_params # dictionary of all header pairs self.is_extended = False self.timestamp_format = SidFile._TIMESTAMP_STANDARD if filename: # Read all lines in a buffer used by 'read_data' and 'read_header' try: with open(self.filename, "rt") as fin: self.lines = fin.readlines() except __HOLE__ as why: print("Error reading", filename) print(str(why)) exit(1) self.read_header() self.read_timestamp_format() self.control_header() self.read_data(force_read_timestamp) elif self.sid_params: # create zeroes numpy arrays to receive data self.control_header() self.clear_buffer() ## ## Read a SID File and control header's consistency ##
IOError
dataset/ETHPy150Open ericgibert/supersid/supersid/sidfile.py/SidFile.__init__
7,468
def read_timestamp_format(self): """Check the timestamp found on the first line to deduce the timestamp format""" first_data_line = self.lines[self.headerNbLines].split(",") if ':' in first_data_line[0]: # yes, a time stamp is found in the first data column try: datetime.strptime(first_data_line[0], SidFile._TIMESTAMP_EXTENDED) self.is_extended = True SidFile._timestamp_format = SidFile._TIMESTAMP_EXTENDED self.timestamp_format = SidFile._TIMESTAMP_EXTENDED except __HOLE__: datetime.strptime(first_data_line[0], SidFile._TIMESTAMP_STANDARD) self.is_extended = False SidFile._timestamp_format = SidFile._TIMESTAMP_STANDARD self.timestamp_format = SidFile._TIMESTAMP_STANDARD
ValueError
dataset/ETHPy150Open ericgibert/supersid/supersid/sidfile.py/SidFile.read_timestamp_format
7,469
@classmethod def _StringToDatetime(cls, strTimestamp): if type(strTimestamp) is not str: # i.e. byte array in Python 3 strTimestamp = strTimestamp.decode('utf-8') try: dts = datetime.strptime(strTimestamp, SidFile._timestamp_format) except __HOLE__: # try the other format... if SidFile._timestamp_format == SidFile._TIMESTAMP_STANDARD: dts = datetime.strptime(strTimestamp, SidFile._TIMESTAMP_EXTENDED) else: dts = datetime.strptime(strTimestamp, SidFile._TIMESTAMP_STANDARD) return dts
ValueError
dataset/ETHPy150Open ericgibert/supersid/supersid/sidfile.py/SidFile._StringToDatetime
7,470
def get_station_data(self, stationId): """Return the numpy array of the given station's data""" try: idx = self.get_station_index(stationId) return self.data[idx] except __HOLE__: return []
ValueError
dataset/ETHPy150Open ericgibert/supersid/supersid/sidfile.py/SidFile.get_station_data
7,471
def copy_data(self, second_sidfile): """Copy the second_sidfile's data on the current data vector for every common stations. If a copy is done then the timestamps are also copied.""" has_copied = False for iStation, station in enumerate(self.stations): try: second_idx = second_sidfile.get_station_index(station) self.data[iStation] = second_sidfile.data[second_idx][:] # deep copy has_copied = True except __HOLE__: # missing station in the second file pass if has_copied: self.timestamp = second_sidfile.timestamp[:] # deep copy ## ## Write a SID File ##
ValueError
dataset/ETHPy150Open ericgibert/supersid/supersid/sidfile.py/SidFile.copy_data
7,472
def __delitem__(self, k): """ C{del dirdbm[foo]} Delete a file in this directory. @type k: str @param k: key to delete @raise KeyError: Raised when there is no such key """ assert type(k) == types.StringType, "DirDBM key must be a string" k = self._encode(k) try: os.remove(os.path.join(self.dname, k)) except (__HOLE__, IOError): raise KeyError(self._decode(k))
OSError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/persisted/dirdbm.py/DirDBM.__delitem__
7,473
def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not hasattr(self.transaction, '__call__')): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body ## START CACHE REGION: ID=header. line 6, col 1 in the source. _RECACHE_header = False _cacheRegion_header = self.getCacheRegion(regionID='header', cacheInfo={'type': 2, 'id': 'header'}) if _cacheRegion_header.isNew(): _RECACHE_header = True _cacheItem_header = _cacheRegion_header.getCacheItem('header') if _cacheItem_header.hasExpired(): _RECACHE_header = True if (not _RECACHE_header) and _cacheItem_header.getRefreshTime(): try: _output = _cacheItem_header.renderOutput() except __HOLE__: _RECACHE_header = True else: write(_output) del _output if _RECACHE_header or not _cacheItem_header.getRefreshTime(): _orig_transheader = trans trans = _cacheCollector_header = DummyTransaction() write = _cacheCollector_header.response().write _v = VFFSL(SL, "docType", True) # '$docType' on line 7, col 1 if _v is not None: write(_filter(_v, rawExpr='$docType')) # from line 7, col 1. write('\n') _v = VFFSL(SL, "htmlTag", True) # '$htmlTag' on line 8, col 1 if _v is not None: write(_filter(_v, rawExpr='$htmlTag')) # from line 8, col 1. write(''' <!-- This document was autogenerated by Cheetah(http://CheetahTemplate.org). Do not edit it directly! Copyright ''') _v = VFFSL(SL, "currentYr", True) # '$currentYr' on line 12, col 11 if _v is not None: write(_filter(_v, rawExpr='$currentYr')) # from line 12, col 11. write(' - ') _v = VFFSL(SL, "siteCopyrightName", True) # '$siteCopyrightName' on line 12, col 24 if _v is not None: write(_filter(_v, rawExpr='$siteCopyrightName')) # from line 12, col 24. write(' - All Rights Reserved.\nFeel free to copy any javascript or html you like on this site,\nprovided you remove all links and/or references to ') _v = VFFSL(SL, "siteDomainName", True) # '$siteDomainName' on line 14, col 52 if _v is not None: write(_filter(_v, rawExpr='$siteDomainName')) # from line 14, col 52. write(''' However, please do not copy any content or images without permission. ''') _v = VFFSL(SL, "siteCredits", True) # '$siteCredits' on line 17, col 1 if _v is not None: write(_filter(_v, rawExpr='$siteCredits')) # from line 17, col 1. write(''' --> ''') self.writeHeadTag(trans=trans) write('\n') trans = _orig_transheader write = trans.response().write _cacheData = _cacheCollector_header.response().getvalue() _cacheItem_header.setData(_cacheData) write(_cacheData) del _cacheData del _cacheCollector_header del _orig_transheader ## END CACHE REGION: header write('\n') _v = VFFSL(SL, "bodyTag", True) # '$bodyTag' on line 34, col 1 if _v is not None: write(_filter(_v, rawExpr='$bodyTag')) # from line 34, col 1. write('\n\n') self.writeBody(trans=trans) write(''' </body> </html> ''') ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES
KeyError
dataset/ETHPy150Open binhex/moviegrabber/lib/site-packages/Cheetah/Templates/SkeletonPage.py/SkeletonPage.respond
7,474
def deepcopy(x): """Deep copy operation on gyp objects such as strings, ints, dicts and lists. More than twice as fast as copy.deepcopy but much less generic.""" try: return _deepcopy_dispatch[type(x)](x) except __HOLE__: raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' + 'or expand simple_copy support.' % type(x))
KeyError
dataset/ETHPy150Open adblockplus/gyp/pylib/gyp/simple_copy.py/deepcopy
7,475
def softspace(file, newvalue): oldvalue = 0 try: oldvalue = file.softspace except AttributeError: pass try: file.softspace = newvalue except (__HOLE__, TypeError): # "attribute-less object" or "read-only attributes" pass return oldvalue
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/code.py/softspace
7,476
def runsource(self, source, filename="<input>", symbol="single"): """Compile and run some source in the interpreter. Arguments are as for compile_command(). One several things can happen: 1) The input is incorrect; compile_command() raised an exception (SyntaxError or OverflowError). A syntax traceback will be printed by calling the showsyntaxerror() method. 2) The input is incomplete, and more input is required; compile_command() returned None. Nothing happens. 3) The input is complete; compile_command() returned a code object. The code is executed by calling self.runcode() (which also handles run-time exceptions, except for SystemExit). The return value is True in case 2, False in the other cases (unless an exception is raised). The return value can be used to decide whether to use sys.ps1 or sys.ps2 to prompt the next line. """ try: code = self.compile(source, filename, symbol) except (OverflowError, SyntaxError, __HOLE__): # Case 1 self.showsyntaxerror(filename) return False if code is None: # Case 2 return True # Case 3 self.runcode(code) return False
ValueError
dataset/ETHPy150Open babble/babble/include/jython/Lib/code.py/InteractiveInterpreter.runsource
7,477
def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: exec code in self.locals except __HOLE__: raise except: self.showtraceback() else: if softspace(sys.stdout, 0): print
SystemExit
dataset/ETHPy150Open babble/babble/include/jython/Lib/code.py/InteractiveInterpreter.runcode
7,478
def interact(self, banner=None): """Closely emulate the interactive Python console. The optional banner argument specify the banner to print before the first interaction; by default it prints a banner similar to the one printed by the real Python interpreter, followed by the current class name in parentheses (so as not to confuse this with the real interpreter -- since it's so close!). """ try: sys.ps1 except __HOLE__: sys.ps1 = ">>> " try: sys.ps2 except AttributeError: sys.ps2 = "... " cprt = 'Type "help", "copyright", "credits" or "license" for more information.' if banner is None: self.write("Python %s on %s\n%s\n(%s)\n" % (sys.version, sys.platform, cprt, self.__class__.__name__)) else: self.write("%s\n" % str(banner)) more = 0 while 1: try: if more: prompt = sys.ps2 else: prompt = sys.ps1 try: line = self.raw_input(prompt) except EOFError: self.write("\n") break else: more = self.push(line) except KeyboardInterrupt: self.write("\nKeyboardInterrupt\n") self.resetbuffer() more = 0
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/code.py/InteractiveConsole.interact
7,479
def interact(banner=None, readfunc=None, local=None): """Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole class. When readfunc is not specified, it attempts to import the readline module to enable GNU readline if it is available. Arguments (all optional, all default to None): banner -- passed to InteractiveConsole.interact() readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() """ console = InteractiveConsole(local) if readfunc is not None: console.raw_input = readfunc else: try: import readline except __HOLE__: pass console.interact(banner)
ImportError
dataset/ETHPy150Open babble/babble/include/jython/Lib/code.py/interact
7,480
def __getitem__(self, key): key = key.lower() try: return self.base_data_types_reverse[key] except __HOLE__: size = get_field_size(key) if size is not None: return ('CharField', {'max_length': size}) raise KeyError
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/sqlite3/introspection.py/FlexibleFieldLookupDict.__getitem__
7,481
def create_driver_settings(self, machine, pulse_ms=None, **kwargs): return_dict = dict() if pulse_ms is None: pulse_ms = machine.config['mpf']['default_pulse_ms'] try: return_dict['allow_enable'] = kwargs['allow_enable'] except __HOLE__: return_dict['allow_enable'] = False return_dict['pulse_ms'] = Util.int_to_hex_string(pulse_ms) return_dict['pwm1'] = 'ff' return_dict['pwm2'] = 'ff' return_dict['recycle_ms'] = '00' return return_dict
KeyError
dataset/ETHPy150Open missionpinball/mpf/mpf/platform/fast.py/FASTDriver.create_driver_settings
7,482
def update(self, data): try: self.dmd_frame = bytearray(data) except __HOLE__: pass
TypeError
dataset/ETHPy150Open missionpinball/mpf/mpf/platform/fast.py/FASTDMD.update
7,483
def identify_connection(self): """Identifies which processor this serial connection is talking to.""" # keep looping and wait for an ID response msg = '' while True: self.platform.log.debug("Sending 'ID:' command to port '%s'", self.serial_connection.name) self.serial_connection.write('ID:\r') msg = self.serial_io.readline() # todo timeout if msg.startswith('ID:'): break # examples of ID responses # ID:DMD FP-CPU-002-1 00.87 # ID:NET FP-CPU-002-2 00.85 # ID:RGB FP-CPU-002-2 00.85 try: self.remote_processor, self.remote_model, self.remote_firmware = ( msg[3:].split()) except __HOLE__: self.remote_processor, self.remote_model, = msg[3:].split() self.platform.log.info("Received ID acknowledgement. Processor: %s, " "Board: %s, Firmware: %s", self.remote_processor, self.remote_model, self.remote_firmware) if self.remote_processor == 'DMD': min_version = DMD_MIN_FW latest_version = DMD_LATEST_FW self.dmd = True elif self.remote_processor == 'NET': min_version = NET_MIN_FW latest_version = NET_LATEST_FW else: min_version = RGB_MIN_FW latest_version = RGB_LATEST_FW if StrictVersion(min_version) > StrictVersion(self.remote_firmware): self.platform.log.critical("Firmware version mismatch. MPF requires" " the %s processor to be firmware %s, but yours is %s", self.remote_processor, min_version, self.remote_firmware) sys.exit() if self.remote_processor == 'NET' and self.platform.machine_type == 'fast': self.query_fast_io_boards()
ValueError
dataset/ETHPy150Open missionpinball/mpf/mpf/platform/fast.py/SerialCommunicator.identify_connection
7,484
def _to_arrays(*args): nargs = [] single = True for a, ndim in args: try: arg = np.array(a, copy=False) except __HOLE__: # Typically end up in here when list of Shapely geometries is # passed in as input. arrays = [np.array(el, copy=False) for el in a] arg = np.array(arrays, copy=False) if arg.ndim != ndim - 1: single = False arg = _shape_func[ndim](arg) nargs.append(arg) return single, nargs
TypeError
dataset/ETHPy150Open jwass/geog/geog/geog.py/_to_arrays
7,485
def getFileByFileId(self, fileId, justPresent = True): cu = self.db.cursor() if justPresent: cu.execute("SELECT path, stream FROM DBTroveFiles " "WHERE fileId=? AND isPresent = 1", fileId) else: cu.execute("SELECT path, stream FROM DBTroveFiles " "WHERE fileId=?", fileId) # there could be multiple matches, but they should all be redundant try: path, stream = cu.next() return (path, stream) except __HOLE__: raise KeyError, fileId
StopIteration
dataset/ETHPy150Open sassoftware/conary/conary/local/sqldb.py/DBTroveFiles.getFileByFileId
7,486
def getId(self, theId, justPresent = True): cu = self.db.cursor() if justPresent: pres = "AND isPresent=1" else: pres = "" cu.execute("SELECT troveName, versionId, flavorId, isPresent " "FROM Instances WHERE instanceId=? %s" % pres, theId) try: return cu.next() except __HOLE__: raise KeyError, theId
StopIteration
dataset/ETHPy150Open sassoftware/conary/conary/local/sqldb.py/DBInstanceTable.getId
7,487
def __getitem__(self, item): cu = self.db.cursor() cu.execute("SELECT instanceId FROM Instances WHERE " "troveName=? AND versionId=? AND flavorId=?", item) try: return cu.next()[0] except __HOLE__: raise KeyError, item
StopIteration
dataset/ETHPy150Open sassoftware/conary/conary/local/sqldb.py/DBInstanceTable.__getitem__
7,488
def getVersion(self, instanceId): cu = self.db.cursor() cu.execute("""SELECT version, timeStamps FROM Instances INNER JOIN Versions ON Instances.versionId = Versions.versionId WHERE instanceId=?""", instanceId) try: (s, t) = cu.next() ts = [ float(x) for x in t.split(":") ] v = versions.VersionFromString(s, timeStamps=ts) return v except __HOLE__: raise KeyError, instanceId
StopIteration
dataset/ETHPy150Open sassoftware/conary/conary/local/sqldb.py/DBInstanceTable.getVersion
7,489
def _getTransactionCounter(self, field): """Get transaction counter Return (Boolean, value) with boolean being True if the counter was found in the table""" if 'DatabaseAttributes' not in self.db.tables: # We should already have converted the schema to have the table in # place. This may mean an update code path run with --info as # non-root (or owner of the schema) # incrementTransactionCounter should fail though. return False, 0 cu = self.db.cursor() cu.execute("SELECT value FROM DatabaseAttributes WHERE name = ?", field) try: row = cu.next() counter = row[0] except __HOLE__: return False, 0 try: counter = int(counter) except ValueError: return True, 0 return True, counter
StopIteration
dataset/ETHPy150Open sassoftware/conary/conary/local/sqldb.py/Database._getTransactionCounter
7,490
def __getattr__(self, key): try: return self[key] except __HOLE__, k: if self.has_key('_default'): return self['_default'] else: raise AttributeError, k
KeyError
dataset/ETHPy150Open FooBarWidget/mizuho/asciidoc/a2x.py/AttrDict.__getattr__
7,491
def __delattr__(self, key): try: del self[key] except __HOLE__, k: raise AttributeError, k
KeyError
dataset/ETHPy150Open FooBarWidget/mizuho/asciidoc/a2x.py/AttrDict.__delattr__
7,492
def shell(cmd, raise_error=True): ''' Execute command cmd in shell and return tuple (stdoutdata, stderrdata, returncode). If raise_error is True then a non-zero return terminates the application. ''' if os.name == 'nt': # TODO: this is probably unnecessary, see: # http://groups.google.com/group/asciidoc/browse_frm/thread/9442ee0c419f1242 # Windows doesn't like running scripts directly so explicitly # specify python interpreter. # Extract first (quoted or unquoted) argument. mo = re.match(r'^\s*"\s*(?P<arg0>[^"]+)\s*"', cmd) if not mo: mo = re.match(r'^\s*(?P<arg0>[^ ]+)', cmd) if mo.group('arg0').endswith('.py'): cmd = 'python ' + cmd # Remove redundant quoting -- this is not just cosmetic, # quoting seems to dramatically decrease the allowed command # length in Windows XP. cmd = re.sub(r'"([^ ]+?)"', r'\1', cmd) verbose('executing: %s' % cmd) if OPTIONS.dry_run: return stdout = stderr = subprocess.PIPE try: popen = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, shell=True, env=ENV) except __HOLE__, e: die('failed: %s: %s' % (cmd, e)) stdoutdata, stderrdata = popen.communicate() if OPTIONS.verbose: print stdoutdata print stderrdata if popen.returncode != 0 and raise_error: die('%s returned non-zero exit status %d' % (cmd, popen.returncode)) return (stdoutdata, stderrdata, popen.returncode)
OSError
dataset/ETHPy150Open FooBarWidget/mizuho/asciidoc/a2x.py/shell
7,493
def prompt_option(text, choices, default=NO_DEFAULT): """ Prompt the user to choose one of a list of options """ while True: for i, msg in enumerate(choices): print "[%d] %s" % (i + 1, msg) response = prompt(text, default=default) try: idx = int(response) - 1 return choices[idx] except (ValueError, __HOLE__): print "Invalid choice\n"
IndexError
dataset/ETHPy150Open mathcamp/pypicloud/pypicloud/scripts.py/prompt_option
7,494
def __getitem__(self, key): try: return self._clsmap[key] except (__HOLE__, e): if not self.initialized: self._mutex.acquire() try: if not self.initialized: self._init() self.initialized = True return self._clsmap[key] finally: self._mutex.release() raise e
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/cache.py/_backends.__getitem__
7,495
def _init(self): try: import pkg_resources # Load up the additional entry point defined backends for entry_point in pkg_resources.iter_entry_points('beaker.backends'): try: namespace_manager = entry_point.load() name = entry_point.name if name in self._clsmap: raise BeakerException("NamespaceManager name conflict,'%s' " "already loaded" % name) self._clsmap[name] = namespace_manager except (InvalidCacheBackendError, SyntaxError): # Ignore invalid backends pass except: import sys from pkg_resources import DistributionNotFound # Warn when there's a problem loading a NamespaceManager if not isinstance(sys.exc_info()[1], DistributionNotFound): import traceback from StringIO import StringIO tb = StringIO() traceback.print_exc(file=tb) warnings.warn( "Unable to load NamespaceManager " "entry point: '%s': %s" % ( entry_point, tb.getvalue()), RuntimeWarning, 2) except __HOLE__: pass # Initialize the basic available backends
ImportError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/cache.py/_backends._init
7,496
def __init__(self, namespace, type='memory', expiretime=None, starttime=None, expire=None, **nsargs): try: cls = clsmap[type] if isinstance(cls, InvalidCacheBackendError): raise cls except __HOLE__: raise TypeError("Unknown cache implementation %r" % type) self.namespace_name = namespace self.namespace = cls(namespace, **nsargs) self.expiretime = expiretime or expire self.starttime = starttime self.nsargs = nsargs
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/cache.py/Cache.__init__
7,497
@classmethod def _get_cache(cls, namespace, kw): key = namespace + str(kw) try: return cache_managers[key] except __HOLE__: cache_managers[key] = cache = cls(namespace, **kw) return cache
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/cache.py/Cache._get_cache
7,498
def authenticate(self, user, password): """ Method to get simplenote auth token Arguments: - user (string): simplenote email address - password (string): simplenote password Returns: Simplenote API token as string """ auth_params = "email=%s&password=%s" % (user, password) values = base64.encodestring(auth_params) request = Request(AUTH_URL, values) try: res = urllib2.urlopen(request).read() token = urllib2.quote(res) except __HOLE__: # no connection exception token = None return token
IOError
dataset/ETHPy150Open cpbotha/nvpy/nvpy/simplenote.py/Simplenote.authenticate
7,499
def get_note(self, noteid): """ method to get a specific note Arguments: - noteid (string): ID of the note to get Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on sucesss and -1 otherwise """ # request note params = '/%s?auth=%s&email=%s' % (str(noteid), self.get_token(), self.username) request = Request(DATA_URL + params) try: response = urllib2.urlopen(request) except HTTPError, e: return e, -1 except __HOLE__, e: return e, -1 note = json.loads(response.read()) #use UTF-8 encoding if isinstance(note["content"], str): note["content"] = note["content"].encode('utf-8') if "tags" in note: note["tags"] = [t.encode('utf-8') if isinstance(t, str) else t for t in note["tags"]] return note, 0
IOError
dataset/ETHPy150Open cpbotha/nvpy/nvpy/simplenote.py/Simplenote.get_note