text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def pgpdf(omega, b, psi, trunc=200): """ Approximate the density log PG(omega | b, psi) using a truncation of the density written as an infinite sum. :param omega: point at which to evaluate density :param b: first parameter of PG :param psi: tilting of PG :param trunc: number of terms in sum """ ns = np.arange(trunc) psi_ns = np.array([_psi_n(omega, n, b) for n in ns]) pdf = np.sum(psi_ns, axis=0) # Account for tilting pdf *= _tilt(omega, b, psi) return pdf
[ "def", "pgpdf", "(", "omega", ",", "b", ",", "psi", ",", "trunc", "=", "200", ")", ":", "ns", "=", "np", ".", "arange", "(", "trunc", ")", "psi_ns", "=", "np", ".", "array", "(", "[", "_psi_n", "(", "omega", ",", "n", ",", "b", ")", "for", "n", "in", "ns", "]", ")", "pdf", "=", "np", ".", "sum", "(", "psi_ns", ",", "axis", "=", "0", ")", "# Account for tilting", "pdf", "*=", "_tilt", "(", "omega", ",", "b", ",", "psi", ")", "return", "pdf" ]
28.111111
15.222222
def __choices2tkvalues(self, choices): """choices: iterable of key, value pairs""" values = [] for k, v in choices: values.append(v) return values
[ "def", "__choices2tkvalues", "(", "self", ",", "choices", ")", ":", "values", "=", "[", "]", "for", "k", ",", "v", "in", "choices", ":", "values", ".", "append", "(", "v", ")", "return", "values" ]
30.833333
11
def path_fraction_id_offset(points, fraction, relative_offset=False): '''Find the segment which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0.0 <= fraction <= 1.0) relative_offset: return absolute or relative segment distance Returns: (segment ID, segment offset) pair. ''' if not (0. <= fraction <= 1.0): raise ValueError("Invalid fraction: %.3f" % fraction) pts = np.array(points)[:, COLS.XYZ] lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1) cum_lengths = np.cumsum(lengths) offset = cum_lengths[-1] * fraction seg_id = np.argmin(cum_lengths < offset) if seg_id > 0: offset -= cum_lengths[seg_id - 1] if relative_offset: offset /= lengths[seg_id] return seg_id, offset
[ "def", "path_fraction_id_offset", "(", "points", ",", "fraction", ",", "relative_offset", "=", "False", ")", ":", "if", "not", "(", "0.", "<=", "fraction", "<=", "1.0", ")", ":", "raise", "ValueError", "(", "\"Invalid fraction: %.3f\"", "%", "fraction", ")", "pts", "=", "np", ".", "array", "(", "points", ")", "[", ":", ",", "COLS", ".", "XYZ", "]", "lengths", "=", "np", ".", "linalg", ".", "norm", "(", "np", ".", "diff", "(", "pts", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "cum_lengths", "=", "np", ".", "cumsum", "(", "lengths", ")", "offset", "=", "cum_lengths", "[", "-", "1", "]", "*", "fraction", "seg_id", "=", "np", ".", "argmin", "(", "cum_lengths", "<", "offset", ")", "if", "seg_id", ">", "0", ":", "offset", "-=", "cum_lengths", "[", "seg_id", "-", "1", "]", "if", "relative_offset", ":", "offset", "/=", "lengths", "[", "seg_id", "]", "return", "seg_id", ",", "offset" ]
38.461538
17.230769
def getMaximinScores(profile): """ Returns a dictionary that associates integer representations of each candidate with their Copeland score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() wmgMap = profile.getWmg() # Initialize each Copeland score as infinity. maximinscores = {} for cand in wmgMap.keys(): maximinscores[cand] = float("inf") # For each pair of candidates, calculate the number of votes in which one beat the other. # For each pair of candidates, calculate the number of times each beats the other. for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2): if cand2 in wmgMap[cand1].keys(): maximinscores[cand1] = min(maximinscores[cand1], wmgMap[cand1][cand2]) maximinscores[cand2] = min(maximinscores[cand2], wmgMap[cand2][cand1]) return maximinscores
[ "def", "getMaximinScores", "(", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "wmgMap", "=", "profile", ".", "getWmg", "(", ")", "# Initialize each Copeland score as infinity.", "maximinscores", "=", "{", "}", "for", "cand", "in", "wmgMap", ".", "keys", "(", ")", ":", "maximinscores", "[", "cand", "]", "=", "float", "(", "\"inf\"", ")", "# For each pair of candidates, calculate the number of votes in which one beat the other.", "# For each pair of candidates, calculate the number of times each beats the other.", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "wmgMap", ".", "keys", "(", ")", ",", "2", ")", ":", "if", "cand2", "in", "wmgMap", "[", "cand1", "]", ".", "keys", "(", ")", ":", "maximinscores", "[", "cand1", "]", "=", "min", "(", "maximinscores", "[", "cand1", "]", ",", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", ")", "maximinscores", "[", "cand2", "]", "=", "min", "(", "maximinscores", "[", "cand2", "]", ",", "wmgMap", "[", "cand2", "]", "[", "cand1", "]", ")", "return", "maximinscores" ]
38.066667
25.2
def _check_smart_storage_message(self): """Check for smart storage message. :returns: result, raid_message """ ssc_mesg = self.smart_storage_config_message result = True raid_message = "" for element in ssc_mesg: if "Success" not in element['MessageId']: result = False raid_message = element['MessageId'] return result, raid_message
[ "def", "_check_smart_storage_message", "(", "self", ")", ":", "ssc_mesg", "=", "self", ".", "smart_storage_config_message", "result", "=", "True", "raid_message", "=", "\"\"", "for", "element", "in", "ssc_mesg", ":", "if", "\"Success\"", "not", "in", "element", "[", "'MessageId'", "]", ":", "result", "=", "False", "raid_message", "=", "element", "[", "'MessageId'", "]", "return", "result", ",", "raid_message" ]
33.076923
10.461538
def complete_object_value( exe_context, # type: ExecutionContext return_type, # type: GraphQLObjectType field_asts, # type: List[Field] info, # type: ResolveInfo path, # type: List[Union[int, str]] result, # type: Any ): # type: (...) -> Dict[str, Any] """ Complete an Object value by evaluating all sub-selections. """ if return_type.is_type_of and not return_type.is_type_of(result, info): raise GraphQLError( u'Expected value of type "{}" but got: {}.'.format( return_type, type(result).__name__ ), field_asts, ) # Collect sub-fields to execute to complete this value. subfield_asts = exe_context.get_sub_fields(return_type, field_asts) return execute_fields(exe_context, return_type, result, subfield_asts, path, info)
[ "def", "complete_object_value", "(", "exe_context", ",", "# type: ExecutionContext", "return_type", ",", "# type: GraphQLObjectType", "field_asts", ",", "# type: List[Field]", "info", ",", "# type: ResolveInfo", "path", ",", "# type: List[Union[int, str]]", "result", ",", "# type: Any", ")", ":", "# type: (...) -> Dict[str, Any]", "if", "return_type", ".", "is_type_of", "and", "not", "return_type", ".", "is_type_of", "(", "result", ",", "info", ")", ":", "raise", "GraphQLError", "(", "u'Expected value of type \"{}\" but got: {}.'", ".", "format", "(", "return_type", ",", "type", "(", "result", ")", ".", "__name__", ")", ",", "field_asts", ",", ")", "# Collect sub-fields to execute to complete this value.", "subfield_asts", "=", "exe_context", ".", "get_sub_fields", "(", "return_type", ",", "field_asts", ")", "return", "execute_fields", "(", "exe_context", ",", "return_type", ",", "result", ",", "subfield_asts", ",", "path", ",", "info", ")" ]
36.130435
17.608696
def validate(self, value): """ Applies the validation criteria. Returns value, new value, or None if invalid. """ try: coord.Angle(value, unit=self.unit) return value except ValueError: return None
[ "def", "validate", "(", "self", ",", "value", ")", ":", "try", ":", "coord", ".", "Angle", "(", "value", ",", "unit", "=", "self", ".", "unit", ")", "return", "value", "except", "ValueError", ":", "return", "None" ]
27.2
10.8
def question_field(self, move_x, move_y): """Question a grid by given position.""" field_status = self.info_map[move_y, move_x] # a questioned or undiscovered field if field_status != 10 and (field_status == 9 or field_status == 11): self.info_map[move_y, move_x] = 10
[ "def", "question_field", "(", "self", ",", "move_x", ",", "move_y", ")", ":", "field_status", "=", "self", ".", "info_map", "[", "move_y", ",", "move_x", "]", "# a questioned or undiscovered field", "if", "field_status", "!=", "10", "and", "(", "field_status", "==", "9", "or", "field_status", "==", "11", ")", ":", "self", ".", "info_map", "[", "move_y", ",", "move_x", "]", "=", "10" ]
43.857143
14.142857
def DataPath(): """ Return the default path for fit data h5 files""" return os.path.abspath('%s/data'%(os.path.dirname( \ os.path.realpath(__file__))))
[ "def", "DataPath", "(", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "'%s/data'", "%", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ")", ")" ]
41
11
def _mount(device): ''' Mount the device in a temporary place. ''' dest = tempfile.mkdtemp() res = __states__['mount.mounted'](dest, device=device, fstype='btrfs', opts='subvol=/', persist=False) if not res['result']: log.error('Cannot mount device %s in %s', device, dest) _umount(dest) return None return dest
[ "def", "_mount", "(", "device", ")", ":", "dest", "=", "tempfile", ".", "mkdtemp", "(", ")", "res", "=", "__states__", "[", "'mount.mounted'", "]", "(", "dest", ",", "device", "=", "device", ",", "fstype", "=", "'btrfs'", ",", "opts", "=", "'subvol=/'", ",", "persist", "=", "False", ")", "if", "not", "res", "[", "'result'", "]", ":", "log", ".", "error", "(", "'Cannot mount device %s in %s'", ",", "device", ",", "dest", ")", "_umount", "(", "dest", ")", "return", "None", "return", "dest" ]
32.5
22.166667
def switch_format(self, gsr): """ Convert the Wharton GSR format into the studyspaces API format. """ if "error" in gsr: return gsr categories = { "cid": 1, "name": "Huntsman Hall", "rooms": [] } for time in gsr["times"]: for entry in time: entry["name"] = entry["room_number"] del entry["room_number"] start_time_str = entry["start_time"] end_time = datetime.datetime.strptime(start_time_str[:-6], '%Y-%m-%dT%H:%M:%S') + datetime.timedelta(minutes=30) end_time_str = end_time.strftime("%Y-%m-%dT%H:%M:%S") + "-{}".format(self.get_dst_gmt_timezone()) time = { "available": not entry["reserved"], "start": entry["start_time"], "end": end_time_str, } exists = False for room in categories["rooms"]: if room["name"] == entry["name"]: room["times"].append(time) exists = True if not exists: del entry["booked_by_user"] del entry["building"] if "reservation_id" in entry: del entry["reservation_id"] entry["lid"] = 1 entry["gid"] = 1 entry["capacity"] = 5 entry["room_id"] = int(entry["id"]) del entry["id"] entry["times"] = [time] del entry["reserved"] del entry["end_time"] del entry["start_time"] categories["rooms"].append(entry) return {"categories": [categories], "rooms": categories["rooms"]}
[ "def", "switch_format", "(", "self", ",", "gsr", ")", ":", "if", "\"error\"", "in", "gsr", ":", "return", "gsr", "categories", "=", "{", "\"cid\"", ":", "1", ",", "\"name\"", ":", "\"Huntsman Hall\"", ",", "\"rooms\"", ":", "[", "]", "}", "for", "time", "in", "gsr", "[", "\"times\"", "]", ":", "for", "entry", "in", "time", ":", "entry", "[", "\"name\"", "]", "=", "entry", "[", "\"room_number\"", "]", "del", "entry", "[", "\"room_number\"", "]", "start_time_str", "=", "entry", "[", "\"start_time\"", "]", "end_time", "=", "datetime", ".", "datetime", ".", "strptime", "(", "start_time_str", "[", ":", "-", "6", "]", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "+", "datetime", ".", "timedelta", "(", "minutes", "=", "30", ")", "end_time_str", "=", "end_time", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "+", "\"-{}\"", ".", "format", "(", "self", ".", "get_dst_gmt_timezone", "(", ")", ")", "time", "=", "{", "\"available\"", ":", "not", "entry", "[", "\"reserved\"", "]", ",", "\"start\"", ":", "entry", "[", "\"start_time\"", "]", ",", "\"end\"", ":", "end_time_str", ",", "}", "exists", "=", "False", "for", "room", "in", "categories", "[", "\"rooms\"", "]", ":", "if", "room", "[", "\"name\"", "]", "==", "entry", "[", "\"name\"", "]", ":", "room", "[", "\"times\"", "]", ".", "append", "(", "time", ")", "exists", "=", "True", "if", "not", "exists", ":", "del", "entry", "[", "\"booked_by_user\"", "]", "del", "entry", "[", "\"building\"", "]", "if", "\"reservation_id\"", "in", "entry", ":", "del", "entry", "[", "\"reservation_id\"", "]", "entry", "[", "\"lid\"", "]", "=", "1", "entry", "[", "\"gid\"", "]", "=", "1", "entry", "[", "\"capacity\"", "]", "=", "5", "entry", "[", "\"room_id\"", "]", "=", "int", "(", "entry", "[", "\"id\"", "]", ")", "del", "entry", "[", "\"id\"", "]", "entry", "[", "\"times\"", "]", "=", "[", "time", "]", "del", "entry", "[", "\"reserved\"", "]", "del", "entry", "[", "\"end_time\"", "]", "del", "entry", "[", "\"start_time\"", "]", "categories", "[", "\"rooms\"", "]", ".", "append", "(", "entry", ")", "return", "{", "\"categories\"", ":", "[", "categories", "]", ",", "\"rooms\"", ":", "categories", "[", "\"rooms\"", "]", "}" ]
42.627907
14
def _key_from_json(self, data): """ Internal method, for creating the Key object. """ key = Key() key.algorithm = data["Algorithm"] key.cipher_mode = data["CipherMode"] key.expiration = datetime.strptime(data["Expiration"].split(".")[0], "%Y-%m-%dT%H:%M:%S") key.key_id = data["ID"] key.key = data["Key"] key.size = data["KeySize"] key.url = data["KeyUrl"] return key
[ "def", "_key_from_json", "(", "self", ",", "data", ")", ":", "key", "=", "Key", "(", ")", "key", ".", "algorithm", "=", "data", "[", "\"Algorithm\"", "]", "key", ".", "cipher_mode", "=", "data", "[", "\"CipherMode\"", "]", "key", ".", "expiration", "=", "datetime", ".", "strptime", "(", "data", "[", "\"Expiration\"", "]", ".", "split", "(", "\".\"", ")", "[", "0", "]", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")", "key", ".", "key_id", "=", "data", "[", "\"ID\"", "]", "key", ".", "key", "=", "data", "[", "\"Key\"", "]", "key", ".", "size", "=", "data", "[", "\"KeySize\"", "]", "key", ".", "url", "=", "data", "[", "\"KeyUrl\"", "]", "return", "key" ]
35.214286
11.642857
def deauthorize_application(request): """ When a user deauthorizes an application, Facebook sends a HTTP POST request to the application's "deauthorization callback" URL. This view picks up on requests of this sort and marks the corresponding users as unauthorized. """ if request.facebook: user = User.objects.get( facebook_id = request.facebook.signed_request.user.id ) user.authorized = False user.save() return HttpResponse() else: return HttpResponse(status=400)
[ "def", "deauthorize_application", "(", "request", ")", ":", "if", "request", ".", "facebook", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "facebook_id", "=", "request", ".", "facebook", ".", "signed_request", ".", "user", ".", "id", ")", "user", ".", "authorized", "=", "False", "user", ".", "save", "(", ")", "return", "HttpResponse", "(", ")", "else", ":", "return", "HttpResponse", "(", "status", "=", "400", ")" ]
31.823529
22.176471
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): """A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). """ path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test
[ "def", "_load_mnist_dataset", "(", "shape", ",", "path", ",", "name", "=", "'mnist'", ",", "url", "=", "'http://yann.lecun.com/exdb/mnist/'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", "# Define functions for loading mnist-like data's images and labels.", "# For convenience, they also download the requested files if needed.", "def", "load_mnist_images", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "logging", ".", "info", "(", "filepath", ")", "# Read the inputs in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "16", ")", "# The inputs are vectors now, we reshape them to monochrome 2D images,", "# following the shape convention: (examples, channels, rows, columns)", "data", "=", "data", ".", "reshape", "(", "shape", ")", "# The inputs come as bytes, we convert them to float32 in range [0,1].", "# (Actually to range [0, 255/256], for compatibility to the version", "# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)", "return", "data", "/", "np", ".", "float32", "(", "256", ")", "def", "load_mnist_labels", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "# Read the labels in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "8", ")", "# The labels are vectors of integers now, that's exactly what we want.", "return", "data", "# Download and read the training and test set images and labels.", "logging", ".", "info", "(", "\"Load or Download {0} > {1}\"", ".", "format", "(", "name", ".", "upper", "(", ")", ",", "path", ")", ")", "X_train", "=", "load_mnist_images", "(", "path", ",", "'train-images-idx3-ubyte.gz'", ")", "y_train", "=", "load_mnist_labels", "(", "path", ",", "'train-labels-idx1-ubyte.gz'", ")", "X_test", "=", "load_mnist_images", "(", "path", ",", "'t10k-images-idx3-ubyte.gz'", ")", "y_test", "=", "load_mnist_labels", "(", "path", ",", "'t10k-labels-idx1-ubyte.gz'", ")", "# We reserve the last 10000 training examples for validation.", "X_train", ",", "X_val", "=", "X_train", "[", ":", "-", "10000", "]", ",", "X_train", "[", "-", "10000", ":", "]", "y_train", ",", "y_val", "=", "y_train", "[", ":", "-", "10000", "]", ",", "y_train", "[", "-", "10000", ":", "]", "# We just return all the arrays in order, as expected in main().", "# (It doesn't matter how we do this as long as we can read them again.)", "X_train", "=", "np", ".", "asarray", "(", "X_train", ",", "dtype", "=", "np", ".", "float32", ")", "y_train", "=", "np", ".", "asarray", "(", "y_train", ",", "dtype", "=", "np", ".", "int32", ")", "X_val", "=", "np", ".", "asarray", "(", "X_val", ",", "dtype", "=", "np", ".", "float32", ")", "y_val", "=", "np", ".", "asarray", "(", "y_val", ",", "dtype", "=", "np", ".", "int32", ")", "X_test", "=", "np", ".", "asarray", "(", "X_test", ",", "dtype", "=", "np", ".", "float32", ")", "y_test", "=", "np", ".", "asarray", "(", "y_test", ",", "dtype", "=", "np", ".", "int32", ")", "return", "X_train", ",", "y_train", ",", "X_val", ",", "y_val", ",", "X_test", ",", "y_test" ]
46.377049
22.57377
def _invalidates_cache(f): """ Decorator for rruleset methods which may invalidate the cached length. """ def inner_func(self, *args, **kwargs): rv = f(self, *args, **kwargs) self._invalidate_cache() return rv return inner_func
[ "def", "_invalidates_cache", "(", "f", ")", ":", "def", "inner_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rv", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_invalidate_cache", "(", ")", "return", "rv", "return", "inner_func" ]
22.166667
15.833333
def create_service(self, customer_id, name, publish_key=None, comment=None): """Create a service.""" body = self._formdata({ "customer_id": customer_id, "name": name, "publish_key": publish_key, "comment": comment, }, FastlyService.FIELDS) content = self._fetch("/service", method="POST", body=body) return FastlyService(self, content)
[ "def", "create_service", "(", "self", ",", "customer_id", ",", "name", ",", "publish_key", "=", "None", ",", "comment", "=", "None", ")", ":", "body", "=", "self", ".", "_formdata", "(", "{", "\"customer_id\"", ":", "customer_id", ",", "\"name\"", ":", "name", ",", "\"publish_key\"", ":", "publish_key", ",", "\"comment\"", ":", "comment", ",", "}", ",", "FastlyService", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service\"", ",", "method", "=", "\"POST\"", ",", "body", "=", "body", ")", "return", "FastlyService", "(", "self", ",", "content", ")" ]
34.8
15.1
def weld_compare(array, scalar, operation, weld_type): """Applies comparison operation between each element in the array with scalar. Parameters ---------- array : numpy.ndarray or WeldObject Input data. scalar : {int, float, str, bool, bytes_} Value to compare with; must be same type as the values in the array. If not a str, it is casted to weld_type (allowing one to write e.g. native Python int). operation : str Operation to do out of: {<, <=, ==, !=, >=, >}. weld_type : WeldType Type of the elements in the input array. Returns ------- WeldObject Representation of this computation. """ obj_id, weld_obj = create_weld_object(array) if not isinstance(scalar, str): scalar = to_weld_literal(scalar, weld_type) cast = '{type}({scalar})'.format(type=weld_type, scalar=scalar) # actually checking WeldVec(WeldChar) if isinstance(weld_type, WeldVec): cast = get_weld_obj_id(weld_obj, scalar) # TODO: there should be no casting! requires Weld fix weld_template = """map( {array}, |a: {type}| a {operation} {cast} )""" weld_obj.weld_code = weld_template.format(array=obj_id, operation=operation, type=weld_type, cast=cast) return weld_obj
[ "def", "weld_compare", "(", "array", ",", "scalar", ",", "operation", ",", "weld_type", ")", ":", "obj_id", ",", "weld_obj", "=", "create_weld_object", "(", "array", ")", "if", "not", "isinstance", "(", "scalar", ",", "str", ")", ":", "scalar", "=", "to_weld_literal", "(", "scalar", ",", "weld_type", ")", "cast", "=", "'{type}({scalar})'", ".", "format", "(", "type", "=", "weld_type", ",", "scalar", "=", "scalar", ")", "# actually checking WeldVec(WeldChar)", "if", "isinstance", "(", "weld_type", ",", "WeldVec", ")", ":", "cast", "=", "get_weld_obj_id", "(", "weld_obj", ",", "scalar", ")", "# TODO: there should be no casting! requires Weld fix", "weld_template", "=", "\"\"\"map(\n {array},\n |a: {type}| \n a {operation} {cast}\n)\"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "array", "=", "obj_id", ",", "operation", "=", "operation", ",", "type", "=", "weld_type", ",", "cast", "=", "cast", ")", "return", "weld_obj" ]
31.863636
20.363636
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs): r''' Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``). :arguments: **exp** (``float``) The power-law exponent. **startx, starty** (``float``) Start coordinates. :options: **width, height, endx, endy** (``float``) Definition of the end coordinate (only on of these options is needed). **rx, ry** (``float``) Shift in x- and y-direction w.r.t. the default coordinates. **units** ([``'relative'``] | ``'absolute'``) The type of units in which the coordinates are specified. Relative coordinates correspond to a fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and scale before calling this function! **axis** ([``plt.gca()``] | ...) Specify the axis to which to apply the limits. ... Any ``plt.text(...)`` option. :returns: The handle of the ``plt.text(...)`` command. ''' # get options/defaults endx = kwargs.pop('endx' , None ) endy = kwargs.pop('endy' , None ) height = kwargs.pop('height', None ) units = kwargs.pop('units' , 'relative') axis = kwargs.pop('axis' , plt.gca() ) # check if axis.get_xscale() != 'log' or axis.get_yscale() != 'log': raise IOError('This function only works on a log-log scale, where the power-law is a straight line') # apply width/height if width is not None: endx = startx + width endy = None elif height is not None: if exp > 0: endy = starty + height elif exp == 0: endy = starty else : endy = starty - height endx = None # transform if units.lower() == 'relative': [startx, endx] = rel2abs_x([startx, endx], axis) [starty, endy] = rel2abs_y([starty, endy], axis) # determine multiplication constant const = starty / ( startx**exp ) # get end x/y-coordinate if endx is not None: endy = const * endx**exp else : endx = ( endy / const )**( 1/exp ) # middle x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) ) y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) ) # plot return axis.text(x, y, text, **kwargs)
[ "def", "annotate_powerlaw", "(", "text", ",", "exp", ",", "startx", ",", "starty", ",", "width", "=", "None", ",", "rx", "=", "0.5", ",", "ry", "=", "0.5", ",", "*", "*", "kwargs", ")", ":", "# get options/defaults", "endx", "=", "kwargs", ".", "pop", "(", "'endx'", ",", "None", ")", "endy", "=", "kwargs", ".", "pop", "(", "'endy'", ",", "None", ")", "height", "=", "kwargs", ".", "pop", "(", "'height'", ",", "None", ")", "units", "=", "kwargs", ".", "pop", "(", "'units'", ",", "'relative'", ")", "axis", "=", "kwargs", ".", "pop", "(", "'axis'", ",", "plt", ".", "gca", "(", ")", ")", "# check", "if", "axis", ".", "get_xscale", "(", ")", "!=", "'log'", "or", "axis", ".", "get_yscale", "(", ")", "!=", "'log'", ":", "raise", "IOError", "(", "'This function only works on a log-log scale, where the power-law is a straight line'", ")", "# apply width/height", "if", "width", "is", "not", "None", ":", "endx", "=", "startx", "+", "width", "endy", "=", "None", "elif", "height", "is", "not", "None", ":", "if", "exp", ">", "0", ":", "endy", "=", "starty", "+", "height", "elif", "exp", "==", "0", ":", "endy", "=", "starty", "else", ":", "endy", "=", "starty", "-", "height", "endx", "=", "None", "# transform", "if", "units", ".", "lower", "(", ")", "==", "'relative'", ":", "[", "startx", ",", "endx", "]", "=", "rel2abs_x", "(", "[", "startx", ",", "endx", "]", ",", "axis", ")", "[", "starty", ",", "endy", "]", "=", "rel2abs_y", "(", "[", "starty", ",", "endy", "]", ",", "axis", ")", "# determine multiplication constant", "const", "=", "starty", "/", "(", "startx", "**", "exp", ")", "# get end x/y-coordinate", "if", "endx", "is", "not", "None", ":", "endy", "=", "const", "*", "endx", "**", "exp", "else", ":", "endx", "=", "(", "endy", "/", "const", ")", "**", "(", "1", "/", "exp", ")", "# middle", "x", "=", "10.", "**", "(", "np", ".", "log10", "(", "startx", ")", "+", "rx", "*", "(", "np", ".", "log10", "(", "endx", ")", "-", "np", ".", "log10", "(", "startx", ")", ")", ")", "y", "=", "10.", "**", "(", "np", ".", "log10", "(", "starty", ")", "+", "ry", "*", "(", "np", ".", "log10", "(", "endy", ")", "-", "np", ".", "log10", "(", "starty", ")", ")", ")", "# plot", "return", "axis", ".", "text", "(", "x", ",", "y", ",", "text", ",", "*", "*", "kwargs", ")" ]
27.696203
25.443038
def find_or_create(self, constructor, props, *, comp=None): """Looks for a model taht matches the given dictionary constraints. If it is not found, a new model of the given type is created and saved to the database, then returned. """ model = self.find_model(constructor, comp or props) if model is None: model = constructor(**props) self.insert_model(model) return model
[ "def", "find_or_create", "(", "self", ",", "constructor", ",", "props", ",", "*", ",", "comp", "=", "None", ")", ":", "model", "=", "self", ".", "find_model", "(", "constructor", ",", "comp", "or", "props", ")", "if", "model", "is", "None", ":", "model", "=", "constructor", "(", "*", "*", "props", ")", "self", ".", "insert_model", "(", "model", ")", "return", "model" ]
44.333333
14.777778
def to_dict(self): """Returns attributes formatted as a dictionary.""" d = {'id': self.id, 'classes': self.classes} d.update(self.kvs) return d
[ "def", "to_dict", "(", "self", ")", ":", "d", "=", "{", "'id'", ":", "self", ".", "id", ",", "'classes'", ":", "self", ".", "classes", "}", "d", ".", "update", "(", "self", ".", "kvs", ")", "return", "d" ]
34.2
14.4
def handle_bad_update(operation, ret): '''report error for bad update''' print("Error " + operation) sys.exit('Return code: ' + str(ret.status_code) + ' Error: ' + ret.text)
[ "def", "handle_bad_update", "(", "operation", ",", "ret", ")", ":", "print", "(", "\"Error \"", "+", "operation", ")", "sys", ".", "exit", "(", "'Return code: '", "+", "str", "(", "ret", ".", "status_code", ")", "+", "' Error: '", "+", "ret", ".", "text", ")" ]
45.5
12.5
def getOccurrenceOfCharAtIndex(self, sym, index): ''' This functions gets the FM-index value of a character at the specified position @param sym - the character to find the occurrence level @param index - the index we want to find the occurrence level at @return - the number of occurrences of char before the specified index ''' #sampling method #get the bin we occupy binID = index >> self.bitPower #these two methods seem to have the same approximate run time if (binID << self.bitPower) == index: ret = self.partialFM[binID][sym] else: ret = self.partialFM[binID][sym] + np.bincount(self.bwt[binID << self.bitPower:index], minlength=6)[sym] return int(ret)
[ "def", "getOccurrenceOfCharAtIndex", "(", "self", ",", "sym", ",", "index", ")", ":", "#sampling method", "#get the bin we occupy", "binID", "=", "index", ">>", "self", ".", "bitPower", "#these two methods seem to have the same approximate run time", "if", "(", "binID", "<<", "self", ".", "bitPower", ")", "==", "index", ":", "ret", "=", "self", ".", "partialFM", "[", "binID", "]", "[", "sym", "]", "else", ":", "ret", "=", "self", ".", "partialFM", "[", "binID", "]", "[", "sym", "]", "+", "np", ".", "bincount", "(", "self", ".", "bwt", "[", "binID", "<<", "self", ".", "bitPower", ":", "index", "]", ",", "minlength", "=", "6", ")", "[", "sym", "]", "return", "int", "(", "ret", ")" ]
45.941176
25
def _search(self, model, condition=None, search_field='name', value_field='id', label_field=None, pagination=True): """ Default search function :param search_field: Used for search field, default is 'name' :param value_field: Used for id field, default is id :param label_field: Used for label field, default is None, then it'll use unicode() function """ from uliweb import json, request name = request.GET.get('term', '') M = functions.get_model(model) def _v(label_field): if label_field: return lambda x: getattr(x, label_field) else: return lambda x: unicode(x) v_field = request.values.get('label', 'title') page = int(request.values.get('page') or 1) limit = int(request.values.get('limit') or 10) v_func = _v(label_field) if name: if condition is None: condition = M.c[search_field].like('%' + name + '%') if pagination: query = M.filter(condition) total = query.count() rows = [{'id': getattr(obj, value_field), v_field: v_func(obj)} for obj in query.limit(limit).offset((page-1)*limit)] result = {'total':total, 'rows':rows} else: result = [{'id': getattr(obj, value_field), v_field: v_func(obj)} for obj in M.filter(condition)] else: result = [] return json(result)
[ "def", "_search", "(", "self", ",", "model", ",", "condition", "=", "None", ",", "search_field", "=", "'name'", ",", "value_field", "=", "'id'", ",", "label_field", "=", "None", ",", "pagination", "=", "True", ")", ":", "from", "uliweb", "import", "json", ",", "request", "name", "=", "request", ".", "GET", ".", "get", "(", "'term'", ",", "''", ")", "M", "=", "functions", ".", "get_model", "(", "model", ")", "def", "_v", "(", "label_field", ")", ":", "if", "label_field", ":", "return", "lambda", "x", ":", "getattr", "(", "x", ",", "label_field", ")", "else", ":", "return", "lambda", "x", ":", "unicode", "(", "x", ")", "v_field", "=", "request", ".", "values", ".", "get", "(", "'label'", ",", "'title'", ")", "page", "=", "int", "(", "request", ".", "values", ".", "get", "(", "'page'", ")", "or", "1", ")", "limit", "=", "int", "(", "request", ".", "values", ".", "get", "(", "'limit'", ")", "or", "10", ")", "v_func", "=", "_v", "(", "label_field", ")", "if", "name", ":", "if", "condition", "is", "None", ":", "condition", "=", "M", ".", "c", "[", "search_field", "]", ".", "like", "(", "'%'", "+", "name", "+", "'%'", ")", "if", "pagination", ":", "query", "=", "M", ".", "filter", "(", "condition", ")", "total", "=", "query", ".", "count", "(", ")", "rows", "=", "[", "{", "'id'", ":", "getattr", "(", "obj", ",", "value_field", ")", ",", "v_field", ":", "v_func", "(", "obj", ")", "}", "for", "obj", "in", "query", ".", "limit", "(", "limit", ")", ".", "offset", "(", "(", "page", "-", "1", ")", "*", "limit", ")", "]", "result", "=", "{", "'total'", ":", "total", ",", "'rows'", ":", "rows", "}", "else", ":", "result", "=", "[", "{", "'id'", ":", "getattr", "(", "obj", ",", "value_field", ")", ",", "v_field", ":", "v_func", "(", "obj", ")", "}", "for", "obj", "in", "M", ".", "filter", "(", "condition", ")", "]", "else", ":", "result", "=", "[", "]", "return", "json", "(", "result", ")" ]
40.631579
18.736842
def get_value(self,column=None,keyValue=None,table=None,verbose=None): """ Returns the value from a cell as specified by row and column ids. :param column (string, optional): Specifies the name of a column in the tab le :param keyValue (string, optional): Specifies a row of a table using the pr imary key as the indentifier :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d. :returns: value from a cell as specified by row and column ids """ PARAMS=set_param(['column','keyValue','table'],[column,keyValue,table]) response=api(url=self.__url+"/get value", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "get_value", "(", "self", ",", "column", "=", "None", ",", "keyValue", "=", "None", ",", "table", "=", "None", ",", "verbose", "=", "None", ")", ":", "PARAMS", "=", "set_param", "(", "[", "'column'", ",", "'keyValue'", ",", "'table'", "]", ",", "[", "column", ",", "keyValue", ",", "table", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/get value\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
48.823529
29.764706
def get_definition(self, name): """Get definitions by name.""" for definition in self.definitions: if definition.name == name: return definition
[ "def", "get_definition", "(", "self", ",", "name", ")", ":", "for", "definition", "in", "self", ".", "definitions", ":", "if", "definition", ".", "name", "==", "name", ":", "return", "definition" ]
36.8
4
def inbox_count_for(user): """ returns the number of unread messages for the given user but does not mark them seen """ return Message.objects.filter(recipient=user, read_at__isnull=True, recipient_deleted_at__isnull=True).count()
[ "def", "inbox_count_for", "(", "user", ")", ":", "return", "Message", ".", "objects", ".", "filter", "(", "recipient", "=", "user", ",", "read_at__isnull", "=", "True", ",", "recipient_deleted_at__isnull", "=", "True", ")", ".", "count", "(", ")" ]
40.833333
23.833333
def _label_generalized(self, node): """Helper method that labels the nodes of GST with indexes of strings found in their descendants. """ if node.is_leaf(): x = {self._get_word_start_index(node.idx)} else: x = {n for ns in node.transition_links for n in ns[0].generalized_idxs} node.generalized_idxs = x
[ "def", "_label_generalized", "(", "self", ",", "node", ")", ":", "if", "node", ".", "is_leaf", "(", ")", ":", "x", "=", "{", "self", ".", "_get_word_start_index", "(", "node", ".", "idx", ")", "}", "else", ":", "x", "=", "{", "n", "for", "ns", "in", "node", ".", "transition_links", "for", "n", "in", "ns", "[", "0", "]", ".", "generalized_idxs", "}", "node", ".", "generalized_idxs", "=", "x" ]
40.777778
12.777778
def isolate_region(sequences, start, end, gap_char='-'): """ Replace regions before and after start:end with gap chars """ # Check arguments if end <= start: raise ValueError("start of slice must precede end ({0} !> {1})".format( end, start)) for sequence in sequences: seq = sequence.seq start_gap = gap_char * start end_gap = gap_char * (len(seq) - end) seq = Seq(start_gap + str(seq[start:end]) + end_gap, alphabet=seq.alphabet) sequence.seq = seq yield sequence
[ "def", "isolate_region", "(", "sequences", ",", "start", ",", "end", ",", "gap_char", "=", "'-'", ")", ":", "# Check arguments", "if", "end", "<=", "start", ":", "raise", "ValueError", "(", "\"start of slice must precede end ({0} !> {1})\"", ".", "format", "(", "end", ",", "start", ")", ")", "for", "sequence", "in", "sequences", ":", "seq", "=", "sequence", ".", "seq", "start_gap", "=", "gap_char", "*", "start", "end_gap", "=", "gap_char", "*", "(", "len", "(", "seq", ")", "-", "end", ")", "seq", "=", "Seq", "(", "start_gap", "+", "str", "(", "seq", "[", "start", ":", "end", "]", ")", "+", "end_gap", ",", "alphabet", "=", "seq", ".", "alphabet", ")", "sequence", ".", "seq", "=", "seq", "yield", "sequence" ]
32.823529
15.176471
def max_await_time_ms(self, max_await_time_ms): """Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. .. note:: `max_await_time_ms` requires server version **>= 3.2** :Parameters: - `max_await_time_ms`: the time limit after which the operation is aborted .. versionadded:: 3.2 """ if (not isinstance(max_await_time_ms, integer_types) and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() # Ignore max_await_time_ms if not tailable or await_data is False. if self.__query_flags & CursorType.TAILABLE_AWAIT: self.__max_await_time_ms = max_await_time_ms return self
[ "def", "max_await_time_ms", "(", "self", ",", "max_await_time_ms", ")", ":", "if", "(", "not", "isinstance", "(", "max_await_time_ms", ",", "integer_types", ")", "and", "max_await_time_ms", "is", "not", "None", ")", ":", "raise", "TypeError", "(", "\"max_await_time_ms must be an integer or None\"", ")", "self", ".", "__check_okay_to_chain", "(", ")", "# Ignore max_await_time_ms if not tailable or await_data is False.", "if", "self", ".", "__query_flags", "&", "CursorType", ".", "TAILABLE_AWAIT", ":", "self", ".", "__max_await_time_ms", "=", "max_await_time_ms", "return", "self" ]
40.37037
24.111111
def status_to_string(cls, status): """ Converts a message status to a string. :param status: Status to convert (p yqode.core.modes.CheckerMessages) :return: The status string. :rtype: str """ strings = {CheckerMessages.INFO: "Info", CheckerMessages.WARNING: "Warning", CheckerMessages.ERROR: "Error"} return strings[status]
[ "def", "status_to_string", "(", "cls", ",", "status", ")", ":", "strings", "=", "{", "CheckerMessages", ".", "INFO", ":", "\"Info\"", ",", "CheckerMessages", ".", "WARNING", ":", "\"Warning\"", ",", "CheckerMessages", ".", "ERROR", ":", "\"Error\"", "}", "return", "strings", "[", "status", "]" ]
34.583333
13.083333
def to_headers(self, span_context): """Convert a SpanContext object to B3 propagation headers. :type span_context: :class:`~opencensus.trace.span_context.SpanContext` :param span_context: SpanContext object. :rtype: dict :returns: B3 propagation headers. """ if not span_context.span_id: span_id = INVALID_SPAN_ID else: span_id = span_context.span_id sampled = span_context.trace_options.enabled return { _TRACE_ID_KEY: span_context.trace_id, _SPAN_ID_KEY: span_id, _SAMPLED_KEY: '1' if sampled else '0' }
[ "def", "to_headers", "(", "self", ",", "span_context", ")", ":", "if", "not", "span_context", ".", "span_id", ":", "span_id", "=", "INVALID_SPAN_ID", "else", ":", "span_id", "=", "span_context", ".", "span_id", "sampled", "=", "span_context", ".", "trace_options", ".", "enabled", "return", "{", "_TRACE_ID_KEY", ":", "span_context", ".", "trace_id", ",", "_SPAN_ID_KEY", ":", "span_id", ",", "_SAMPLED_KEY", ":", "'1'", "if", "sampled", "else", "'0'", "}" ]
28.173913
17.26087
def toggle_help(self, event, mode=None): """ Show/hide help message on help button click. """ # if mode == 'open', show no matter what. # if mode == 'close', close. otherwise, change state btn = self.toggle_help_btn shown = self.help_msg_boxsizer.GetStaticBox().IsShown() # if mode is specified, do that mode if mode == 'open': self.help_msg_boxsizer.ShowItems(True) btn.SetLabel('Hide help') elif mode == 'close': self.help_msg_boxsizer.ShowItems(False) btn.SetLabel('Show help') # otherwise, simply toggle states else: if shown: self.help_msg_boxsizer.ShowItems(False) btn.SetLabel('Show help') else: self.help_msg_boxsizer.ShowItems(True) btn.SetLabel('Hide help') self.do_fit(None)
[ "def", "toggle_help", "(", "self", ",", "event", ",", "mode", "=", "None", ")", ":", "# if mode == 'open', show no matter what.", "# if mode == 'close', close. otherwise, change state", "btn", "=", "self", ".", "toggle_help_btn", "shown", "=", "self", ".", "help_msg_boxsizer", ".", "GetStaticBox", "(", ")", ".", "IsShown", "(", ")", "# if mode is specified, do that mode", "if", "mode", "==", "'open'", ":", "self", ".", "help_msg_boxsizer", ".", "ShowItems", "(", "True", ")", "btn", ".", "SetLabel", "(", "'Hide help'", ")", "elif", "mode", "==", "'close'", ":", "self", ".", "help_msg_boxsizer", ".", "ShowItems", "(", "False", ")", "btn", ".", "SetLabel", "(", "'Show help'", ")", "# otherwise, simply toggle states", "else", ":", "if", "shown", ":", "self", ".", "help_msg_boxsizer", ".", "ShowItems", "(", "False", ")", "btn", ".", "SetLabel", "(", "'Show help'", ")", "else", ":", "self", ".", "help_msg_boxsizer", ".", "ShowItems", "(", "True", ")", "btn", ".", "SetLabel", "(", "'Hide help'", ")", "self", ".", "do_fit", "(", "None", ")" ]
37.625
10.125
def create(self, friendly_name=values.unset, apn_credential_sid=values.unset, gcm_credential_sid=values.unset, messaging_service_sid=values.unset, facebook_messenger_page_id=values.unset, default_apn_notification_protocol_version=values.unset, default_gcm_notification_protocol_version=values.unset, fcm_credential_sid=values.unset, default_fcm_notification_protocol_version=values.unset, log_enabled=values.unset, alexa_skill_id=values.unset, default_alexa_notification_protocol_version=values.unset): """ Create a new ServiceInstance :param unicode friendly_name: A string to describe the resource :param unicode apn_credential_sid: The SID of the Credential to use for APN Bindings :param unicode gcm_credential_sid: The SID of the Credential to use for GCM Bindings :param unicode messaging_service_sid: The SID of the Messaging Service to use for SMS Bindings :param unicode facebook_messenger_page_id: Deprecated :param unicode default_apn_notification_protocol_version: The protocol version to use for sending APNS notifications :param unicode default_gcm_notification_protocol_version: The protocol version to use for sending GCM notifications :param unicode fcm_credential_sid: The SID of the Credential to use for FCM Bindings :param unicode default_fcm_notification_protocol_version: The protocol version to use for sending FCM notifications :param bool log_enabled: Whether to log notifications :param unicode alexa_skill_id: Deprecated :param unicode default_alexa_notification_protocol_version: Deprecated :returns: Newly created ServiceInstance :rtype: twilio.rest.notify.v1.service.ServiceInstance """ data = values.of({ 'FriendlyName': friendly_name, 'ApnCredentialSid': apn_credential_sid, 'GcmCredentialSid': gcm_credential_sid, 'MessagingServiceSid': messaging_service_sid, 'FacebookMessengerPageId': facebook_messenger_page_id, 'DefaultApnNotificationProtocolVersion': default_apn_notification_protocol_version, 'DefaultGcmNotificationProtocolVersion': default_gcm_notification_protocol_version, 'FcmCredentialSid': fcm_credential_sid, 'DefaultFcmNotificationProtocolVersion': default_fcm_notification_protocol_version, 'LogEnabled': log_enabled, 'AlexaSkillId': alexa_skill_id, 'DefaultAlexaNotificationProtocolVersion': default_alexa_notification_protocol_version, }) payload = self._version.create( 'POST', self._uri, data=data, ) return ServiceInstance(self._version, payload, )
[ "def", "create", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "apn_credential_sid", "=", "values", ".", "unset", ",", "gcm_credential_sid", "=", "values", ".", "unset", ",", "messaging_service_sid", "=", "values", ".", "unset", ",", "facebook_messenger_page_id", "=", "values", ".", "unset", ",", "default_apn_notification_protocol_version", "=", "values", ".", "unset", ",", "default_gcm_notification_protocol_version", "=", "values", ".", "unset", ",", "fcm_credential_sid", "=", "values", ".", "unset", ",", "default_fcm_notification_protocol_version", "=", "values", ".", "unset", ",", "log_enabled", "=", "values", ".", "unset", ",", "alexa_skill_id", "=", "values", ".", "unset", ",", "default_alexa_notification_protocol_version", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'FriendlyName'", ":", "friendly_name", ",", "'ApnCredentialSid'", ":", "apn_credential_sid", ",", "'GcmCredentialSid'", ":", "gcm_credential_sid", ",", "'MessagingServiceSid'", ":", "messaging_service_sid", ",", "'FacebookMessengerPageId'", ":", "facebook_messenger_page_id", ",", "'DefaultApnNotificationProtocolVersion'", ":", "default_apn_notification_protocol_version", ",", "'DefaultGcmNotificationProtocolVersion'", ":", "default_gcm_notification_protocol_version", ",", "'FcmCredentialSid'", ":", "fcm_credential_sid", ",", "'DefaultFcmNotificationProtocolVersion'", ":", "default_fcm_notification_protocol_version", ",", "'LogEnabled'", ":", "log_enabled", ",", "'AlexaSkillId'", ":", "alexa_skill_id", ",", "'DefaultAlexaNotificationProtocolVersion'", ":", "default_alexa_notification_protocol_version", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "ServiceInstance", "(", "self", ".", "_version", ",", "payload", ",", ")" ]
56.68
29.8
def build_sort(): '''Build sort query paramter from kwargs''' sorts = request.args.getlist('sort') sorts = [sorts] if isinstance(sorts, basestring) else sorts sorts = [s.split(' ') for s in sorts] return [{SORTS[s]: d} for s, d in sorts if s in SORTS]
[ "def", "build_sort", "(", ")", ":", "sorts", "=", "request", ".", "args", ".", "getlist", "(", "'sort'", ")", "sorts", "=", "[", "sorts", "]", "if", "isinstance", "(", "sorts", ",", "basestring", ")", "else", "sorts", "sorts", "=", "[", "s", ".", "split", "(", "' '", ")", "for", "s", "in", "sorts", "]", "return", "[", "{", "SORTS", "[", "s", "]", ":", "d", "}", "for", "s", ",", "d", "in", "sorts", "if", "s", "in", "SORTS", "]" ]
44.333333
12
def until_protocol(self, timeout=None): """Return future that resolves after receipt of katcp protocol info. If the returned future resolves, the server's protocol information is available in the ProtocolFlags instance self.protocol_flags. """ t0 = self.ioloop.time() yield self.until_running(timeout=timeout) t1 = self.ioloop.time() if timeout: timedelta = timeout - (t1 - t0) else: timedelta = None assert get_thread_ident() == self.ioloop_thread_id yield self._received_protocol_info.until_set(timeout=timedelta)
[ "def", "until_protocol", "(", "self", ",", "timeout", "=", "None", ")", ":", "t0", "=", "self", ".", "ioloop", ".", "time", "(", ")", "yield", "self", ".", "until_running", "(", "timeout", "=", "timeout", ")", "t1", "=", "self", ".", "ioloop", ".", "time", "(", ")", "if", "timeout", ":", "timedelta", "=", "timeout", "-", "(", "t1", "-", "t0", ")", "else", ":", "timedelta", "=", "None", "assert", "get_thread_ident", "(", ")", "==", "self", ".", "ioloop_thread_id", "yield", "self", ".", "_received_protocol_info", ".", "until_set", "(", "timeout", "=", "timedelta", ")" ]
38.375
17.8125
def changelog(ctx, check, version, old_version, initial, quiet, dry_run): """Perform the operations needed to update the changelog. This method is supposed to be used by other tasks and not directly. """ if check not in get_valid_checks(): abort('Check `{}` is not an Agent-based Integration'.format(check)) # sanity check on the version provided cur_version = old_version or get_version_string(check) if parse_version_info(version) <= parse_version_info(cur_version): abort('Current version is {}, cannot bump to {}'.format(cur_version, version)) if not quiet: echo_info('Current version of check {}: {}, bumping to: {}'.format(check, cur_version, version)) # get the name of the current release tag target_tag = get_release_tag_string(check, cur_version) # get the diff from HEAD diff_lines = get_commits_since(check, None if initial else target_tag) # for each PR get the title, we'll use it to populate the changelog pr_numbers = parse_pr_numbers(diff_lines) if not quiet: echo_info('Found {} PRs merged since tag: {}'.format(len(pr_numbers), target_tag)) if initial: # Only use the first one del pr_numbers[:-1] user_config = ctx.obj entries = [] for pr_num in pr_numbers: try: payload = get_pr(pr_num, user_config) except Exception as e: echo_failure('Unable to fetch info for PR #{}: {}'.format(pr_num, e)) continue changelog_labels = get_changelog_types(payload) if not changelog_labels: abort('No valid changelog labels found attached to PR #{}, please add one!'.format(pr_num)) elif len(changelog_labels) > 1: abort('Multiple changelog labels found attached to PR #{}, please only use one!'.format(pr_num)) changelog_type = changelog_labels[0] if changelog_type == CHANGELOG_TYPE_NONE: if not quiet: # No changelog entry for this PR echo_info('Skipping PR #{} from changelog due to label'.format(pr_num)) continue author = payload.get('user', {}).get('login') author_url = payload.get('user', {}).get('html_url') title = '[{}] {}'.format(changelog_type, payload.get('title')) entry = ChangelogEntry(pr_num, title, payload.get('html_url'), author, author_url, from_contributor(payload)) entries.append(entry) # store the new changelog in memory new_entry = StringIO() # the header contains version and date header = '## {} / {}\n'.format(version, datetime.now().strftime('%Y-%m-%d')) new_entry.write(header) # one bullet point for each PR new_entry.write('\n') for entry in entries: thanks_note = '' if entry.from_contributor: thanks_note = ' Thanks [{}]({}).'.format(entry.author, entry.author_url) new_entry.write('* {}. See [#{}]({}).{}\n'.format(entry.title, entry.number, entry.url, thanks_note)) new_entry.write('\n') # read the old contents changelog_path = os.path.join(get_root(), check, 'CHANGELOG.md') old = list(stream_file_lines(changelog_path)) # write the new changelog in memory changelog_buffer = StringIO() # preserve the title changelog_buffer.write(''.join(old[:2])) # prepend the new changelog to the old contents # make the command idempotent if header not in old: changelog_buffer.write(new_entry.getvalue()) # append the rest of the old changelog changelog_buffer.write(''.join(old[2:])) # print on the standard out in case of a dry run if dry_run: echo_info(changelog_buffer.getvalue()) else: # overwrite the old changelog write_file(changelog_path, changelog_buffer.getvalue())
[ "def", "changelog", "(", "ctx", ",", "check", ",", "version", ",", "old_version", ",", "initial", ",", "quiet", ",", "dry_run", ")", ":", "if", "check", "not", "in", "get_valid_checks", "(", ")", ":", "abort", "(", "'Check `{}` is not an Agent-based Integration'", ".", "format", "(", "check", ")", ")", "# sanity check on the version provided", "cur_version", "=", "old_version", "or", "get_version_string", "(", "check", ")", "if", "parse_version_info", "(", "version", ")", "<=", "parse_version_info", "(", "cur_version", ")", ":", "abort", "(", "'Current version is {}, cannot bump to {}'", ".", "format", "(", "cur_version", ",", "version", ")", ")", "if", "not", "quiet", ":", "echo_info", "(", "'Current version of check {}: {}, bumping to: {}'", ".", "format", "(", "check", ",", "cur_version", ",", "version", ")", ")", "# get the name of the current release tag", "target_tag", "=", "get_release_tag_string", "(", "check", ",", "cur_version", ")", "# get the diff from HEAD", "diff_lines", "=", "get_commits_since", "(", "check", ",", "None", "if", "initial", "else", "target_tag", ")", "# for each PR get the title, we'll use it to populate the changelog", "pr_numbers", "=", "parse_pr_numbers", "(", "diff_lines", ")", "if", "not", "quiet", ":", "echo_info", "(", "'Found {} PRs merged since tag: {}'", ".", "format", "(", "len", "(", "pr_numbers", ")", ",", "target_tag", ")", ")", "if", "initial", ":", "# Only use the first one", "del", "pr_numbers", "[", ":", "-", "1", "]", "user_config", "=", "ctx", ".", "obj", "entries", "=", "[", "]", "for", "pr_num", "in", "pr_numbers", ":", "try", ":", "payload", "=", "get_pr", "(", "pr_num", ",", "user_config", ")", "except", "Exception", "as", "e", ":", "echo_failure", "(", "'Unable to fetch info for PR #{}: {}'", ".", "format", "(", "pr_num", ",", "e", ")", ")", "continue", "changelog_labels", "=", "get_changelog_types", "(", "payload", ")", "if", "not", "changelog_labels", ":", "abort", "(", "'No valid changelog labels found attached to PR #{}, please add one!'", ".", "format", "(", "pr_num", ")", ")", "elif", "len", "(", "changelog_labels", ")", ">", "1", ":", "abort", "(", "'Multiple changelog labels found attached to PR #{}, please only use one!'", ".", "format", "(", "pr_num", ")", ")", "changelog_type", "=", "changelog_labels", "[", "0", "]", "if", "changelog_type", "==", "CHANGELOG_TYPE_NONE", ":", "if", "not", "quiet", ":", "# No changelog entry for this PR", "echo_info", "(", "'Skipping PR #{} from changelog due to label'", ".", "format", "(", "pr_num", ")", ")", "continue", "author", "=", "payload", ".", "get", "(", "'user'", ",", "{", "}", ")", ".", "get", "(", "'login'", ")", "author_url", "=", "payload", ".", "get", "(", "'user'", ",", "{", "}", ")", ".", "get", "(", "'html_url'", ")", "title", "=", "'[{}] {}'", ".", "format", "(", "changelog_type", ",", "payload", ".", "get", "(", "'title'", ")", ")", "entry", "=", "ChangelogEntry", "(", "pr_num", ",", "title", ",", "payload", ".", "get", "(", "'html_url'", ")", ",", "author", ",", "author_url", ",", "from_contributor", "(", "payload", ")", ")", "entries", ".", "append", "(", "entry", ")", "# store the new changelog in memory", "new_entry", "=", "StringIO", "(", ")", "# the header contains version and date", "header", "=", "'## {} / {}\\n'", ".", "format", "(", "version", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", "new_entry", ".", "write", "(", "header", ")", "# one bullet point for each PR", "new_entry", ".", "write", "(", "'\\n'", ")", "for", "entry", "in", "entries", ":", "thanks_note", "=", "''", "if", "entry", ".", "from_contributor", ":", "thanks_note", "=", "' Thanks [{}]({}).'", ".", "format", "(", "entry", ".", "author", ",", "entry", ".", "author_url", ")", "new_entry", ".", "write", "(", "'* {}. See [#{}]({}).{}\\n'", ".", "format", "(", "entry", ".", "title", ",", "entry", ".", "number", ",", "entry", ".", "url", ",", "thanks_note", ")", ")", "new_entry", ".", "write", "(", "'\\n'", ")", "# read the old contents", "changelog_path", "=", "os", ".", "path", ".", "join", "(", "get_root", "(", ")", ",", "check", ",", "'CHANGELOG.md'", ")", "old", "=", "list", "(", "stream_file_lines", "(", "changelog_path", ")", ")", "# write the new changelog in memory", "changelog_buffer", "=", "StringIO", "(", ")", "# preserve the title", "changelog_buffer", ".", "write", "(", "''", ".", "join", "(", "old", "[", ":", "2", "]", ")", ")", "# prepend the new changelog to the old contents", "# make the command idempotent", "if", "header", "not", "in", "old", ":", "changelog_buffer", ".", "write", "(", "new_entry", ".", "getvalue", "(", ")", ")", "# append the rest of the old changelog", "changelog_buffer", ".", "write", "(", "''", ".", "join", "(", "old", "[", "2", ":", "]", ")", ")", "# print on the standard out in case of a dry run", "if", "dry_run", ":", "echo_info", "(", "changelog_buffer", ".", "getvalue", "(", ")", ")", "else", ":", "# overwrite the old changelog", "write_file", "(", "changelog_path", ",", "changelog_buffer", ".", "getvalue", "(", ")", ")" ]
36.607843
24.313725
def vectorize_range(values): """ This function is for url encoding. Takes a value or a tuple or list of tuples and returns a single result, tuples are joined by "," if necessary, elements in tuple are joined by '_' """ if isinstance(values, tuple): return '_'.join(str(i) for i in values) if isinstance(values, list): if not all([isinstance(item, tuple) for item in values]): raise TypeError('Items in the list must be tuples') return ','.join('_'.join(str(i) for i in v) for v in values) return str(values)
[ "def", "vectorize_range", "(", "values", ")", ":", "if", "isinstance", "(", "values", ",", "tuple", ")", ":", "return", "'_'", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "values", ")", "if", "isinstance", "(", "values", ",", "list", ")", ":", "if", "not", "all", "(", "[", "isinstance", "(", "item", ",", "tuple", ")", "for", "item", "in", "values", "]", ")", ":", "raise", "TypeError", "(", "'Items in the list must be tuples'", ")", "return", "','", ".", "join", "(", "'_'", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "v", ")", "for", "v", "in", "values", ")", "return", "str", "(", "values", ")" ]
43.307692
15.615385
def backoff( max_tries=constants.BACKOFF_DEFAULT_MAXTRIES, delay=constants.BACKOFF_DEFAULT_DELAY, factor=constants.BACKOFF_DEFAULT_FACTOR, exceptions=None): """Implements an exponential backoff decorator which will retry decorated function upon given exceptions. This implementation is based on `Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from the *Python Decorator Library*. :param int max_tries: Number of tries before give up. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`. :param int delay: Delay between retries (in seconds). Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`. :param int factor: Multiply factor in which delay will be increased for the next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`. :param exceptions: Tuple of exception types to catch that triggers retry. Any exception not listed will break the decorator and retry routines will not run. :type exceptions: tuple[Exception] """ if max_tries <= 0: raise ValueError('Max tries must be greater than 0; got {!r}'.format(max_tries)) if delay <= 0: raise ValueError('Delay must be greater than 0; got {!r}'.format(delay)) if factor <= 1: raise ValueError('Backoff factor must be greater than 1; got {!r}'.format(factor)) def outter(f): def inner(*args, **kwargs): m_max_tries, m_delay = max_tries, delay # make mutable while m_max_tries > 0: try: retval = f(*args, **kwargs) except exceptions: logger.exception('backoff retry for: %r (max_tries=%r, delay=%r, ' 'factor=%r, exceptions=%r)', f, max_tries, delay, factor, exceptions) m_max_tries -= 1 # consume an attempt if m_max_tries <= 0: raise # run out of tries time.sleep(m_delay) # wait... m_delay *= factor # make future wait longer else: # we're done without errors return retval return inner return outter
[ "def", "backoff", "(", "max_tries", "=", "constants", ".", "BACKOFF_DEFAULT_MAXTRIES", ",", "delay", "=", "constants", ".", "BACKOFF_DEFAULT_DELAY", ",", "factor", "=", "constants", ".", "BACKOFF_DEFAULT_FACTOR", ",", "exceptions", "=", "None", ")", ":", "if", "max_tries", "<=", "0", ":", "raise", "ValueError", "(", "'Max tries must be greater than 0; got {!r}'", ".", "format", "(", "max_tries", ")", ")", "if", "delay", "<=", "0", ":", "raise", "ValueError", "(", "'Delay must be greater than 0; got {!r}'", ".", "format", "(", "delay", ")", ")", "if", "factor", "<=", "1", ":", "raise", "ValueError", "(", "'Backoff factor must be greater than 1; got {!r}'", ".", "format", "(", "factor", ")", ")", "def", "outter", "(", "f", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "m_max_tries", ",", "m_delay", "=", "max_tries", ",", "delay", "# make mutable", "while", "m_max_tries", ">", "0", ":", "try", ":", "retval", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "exceptions", ":", "logger", ".", "exception", "(", "'backoff retry for: %r (max_tries=%r, delay=%r, '", "'factor=%r, exceptions=%r)'", ",", "f", ",", "max_tries", ",", "delay", ",", "factor", ",", "exceptions", ")", "m_max_tries", "-=", "1", "# consume an attempt", "if", "m_max_tries", "<=", "0", ":", "raise", "# run out of tries", "time", ".", "sleep", "(", "m_delay", ")", "# wait...", "m_delay", "*=", "factor", "# make future wait longer", "else", ":", "# we're done without errors", "return", "retval", "return", "inner", "return", "outter" ]
41.185185
24
def decode(string): """Decode Erlang external term.""" if not string: raise IncompleteData(string) if string[0] != 131: raise ValueError("unknown protocol version: %r" % string[0]) if string[1:2] == b'P': # compressed term if len(string) < 16: raise IncompleteData(string) d = decompressobj() term_string = d.decompress(string[6:]) + d.flush() uncompressed_size, = _int4_unpack(string[2:6]) if len(term_string) != uncompressed_size: raise ValueError( "invalid compressed tag, " "%d bytes but got %d" % (uncompressed_size, len(term_string))) # tail data returned by decode_term() can be simple ignored term, _tail = decode_term(term_string) return term, d.unused_data return decode_term(string[1:])
[ "def", "decode", "(", "string", ")", ":", "if", "not", "string", ":", "raise", "IncompleteData", "(", "string", ")", "if", "string", "[", "0", "]", "!=", "131", ":", "raise", "ValueError", "(", "\"unknown protocol version: %r\"", "%", "string", "[", "0", "]", ")", "if", "string", "[", "1", ":", "2", "]", "==", "b'P'", ":", "# compressed term", "if", "len", "(", "string", ")", "<", "16", ":", "raise", "IncompleteData", "(", "string", ")", "d", "=", "decompressobj", "(", ")", "term_string", "=", "d", ".", "decompress", "(", "string", "[", "6", ":", "]", ")", "+", "d", ".", "flush", "(", ")", "uncompressed_size", ",", "=", "_int4_unpack", "(", "string", "[", "2", ":", "6", "]", ")", "if", "len", "(", "term_string", ")", "!=", "uncompressed_size", ":", "raise", "ValueError", "(", "\"invalid compressed tag, \"", "\"%d bytes but got %d\"", "%", "(", "uncompressed_size", ",", "len", "(", "term_string", ")", ")", ")", "# tail data returned by decode_term() can be simple ignored", "term", ",", "_tail", "=", "decode_term", "(", "term_string", ")", "return", "term", ",", "d", ".", "unused_data", "return", "decode_term", "(", "string", "[", "1", ":", "]", ")" ]
40.047619
13.380952
def _pack(self, fname, encoding): """Save workflow with ``--pack`` option This means that al tools and subworkflows are included in the workflow file that is created. A packed workflow cannot be loaded and used in scriptcwl. """ (fd, tmpfile) = tempfile.mkstemp() os.close(fd) try: self.save(tmpfile, mode='abs', validate=False) document_loader, processobj, metadata, uri = load_cwl(tmpfile) finally: # cleanup tmpfile os.remove(tmpfile) with codecs.open(fname, 'wb', encoding=encoding) as f: f.write(print_pack(document_loader, processobj, uri, metadata))
[ "def", "_pack", "(", "self", ",", "fname", ",", "encoding", ")", ":", "(", "fd", ",", "tmpfile", ")", "=", "tempfile", ".", "mkstemp", "(", ")", "os", ".", "close", "(", "fd", ")", "try", ":", "self", ".", "save", "(", "tmpfile", ",", "mode", "=", "'abs'", ",", "validate", "=", "False", ")", "document_loader", ",", "processobj", ",", "metadata", ",", "uri", "=", "load_cwl", "(", "tmpfile", ")", "finally", ":", "# cleanup tmpfile", "os", ".", "remove", "(", "tmpfile", ")", "with", "codecs", ".", "open", "(", "fname", ",", "'wb'", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "f", ".", "write", "(", "print_pack", "(", "document_loader", ",", "processobj", ",", "uri", ",", "metadata", ")", ")" ]
37.833333
21.5
def build_app_loggers(log_level, apps, handlers=None): """ Return a logger dict for app packages with the given log level and no propogation since the apps list is parsed/normalized to be the set of top- level apps. The optional handlers argument is provided so that this pattern of app loggers can be used independently of the configure_logger method below, if desired. """ # Use 'default' handler provided by DEFAULT_LOGGING config if # not supplied. if handlers is None: handlers = ['default'] # The log config expects the handlers value to be a list, so let's # make sure of that here. if not isinstance(handlers, list): handlers = list(handlers) app_loggers = {} for app in apps: app_loggers[app] = { 'level': log_level, 'handlers': handlers, 'propagate': False, } return app_loggers
[ "def", "build_app_loggers", "(", "log_level", ",", "apps", ",", "handlers", "=", "None", ")", ":", "# Use 'default' handler provided by DEFAULT_LOGGING config if", "# not supplied.", "if", "handlers", "is", "None", ":", "handlers", "=", "[", "'default'", "]", "# The log config expects the handlers value to be a list, so let's", "# make sure of that here.", "if", "not", "isinstance", "(", "handlers", ",", "list", ")", ":", "handlers", "=", "list", "(", "handlers", ")", "app_loggers", "=", "{", "}", "for", "app", "in", "apps", ":", "app_loggers", "[", "app", "]", "=", "{", "'level'", ":", "log_level", ",", "'handlers'", ":", "handlers", ",", "'propagate'", ":", "False", ",", "}", "return", "app_loggers" ]
33.148148
20.481481
def merge(args): """ %prog merge bedfiles > newbedfile Concatenate bed files together. Performing seqid and name changes to avoid conflicts in the new bed file. """ p = OptionParser(merge.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) bedfiles = args fw = must_open(opts.outfile, "w") for bedfile in bedfiles: bed = Bed(bedfile) pf = op.basename(bedfile).split(".")[0] for b in bed: b.seqid = "_".join((pf, b.seqid)) print(b, file=fw)
[ "def", "merge", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "bedfiles", "=", "args", "fw", "=", "must_open", "(", "opts", ".", "outfile", ",", "\"w\"", ")", "for", "bedfile", "in", "bedfiles", ":", "bed", "=", "Bed", "(", "bedfile", ")", "pf", "=", "op", ".", "basename", "(", "bedfile", ")", ".", "split", "(", "\".\"", ")", "[", "0", "]", "for", "b", "in", "bed", ":", "b", ".", "seqid", "=", "\"_\"", ".", "join", "(", "(", "pf", ",", "b", ".", "seqid", ")", ")", "print", "(", "b", ",", "file", "=", "fw", ")" ]
26.227273
15.318182
def fields_to_dict(fields, type_=OrderedDict): """Convert a flat list of key/values into an OrderedDict""" fields_iterator = iter(fields) return type_(zip(fields_iterator, fields_iterator))
[ "def", "fields_to_dict", "(", "fields", ",", "type_", "=", "OrderedDict", ")", ":", "fields_iterator", "=", "iter", "(", "fields", ")", "return", "type_", "(", "zip", "(", "fields_iterator", ",", "fields_iterator", ")", ")" ]
49.5
6.75
def get_invoices_per_page(self, per_page=1000, page=1, params=None): """ Get invoices per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=INVOICES, per_page=per_page, page=page, params=params)
[ "def", "get_invoices_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "INVOICES", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
40.7
20.5
def generator_to_list(function): """ Wrap a generator function so that it returns a list when called. For example: # Define a generator >>> def mygen(n): ... i = 0 ... while i < n: ... yield i ... i += 1 # This is how it might work >>> generator = mygen(5) >>> generator.next() 0 >>> generator.next() 1 # Wrap it in generator_to_list, and it will behave differently. >>> mygen = generator_to_list(mygen) >>> mygen(5) [0, 1, 2, 3, 4] """ def wrapper(*args, **kwargs): return list(function(*args, **kwargs)) wrapper.__name__ = function.__name__ wrapper.__doc__ = function.__doc__ return wrapper
[ "def", "generator_to_list", "(", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "wrapper", ".", "__name__", "=", "function", ".", "__name__", "wrapper", ".", "__doc__", "=", "function", ".", "__doc__", "return", "wrapper" ]
26.724138
15.758621
def get_authorization_admin_session_for_vault(self, vault_id): """Gets the ``OsidSession`` associated with the authorization admin service for the given vault. arg: vault_id (osid.id.Id): the ``Id`` of the vault return: (osid.authorization.AuthorizationAdminSession) - ``an _authorization_admin_session`` raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_authorization_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_authorization_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_authorization_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AuthorizationAdminSession(vault_id, runtime=self._runtime)
[ "def", "get_authorization_admin_session_for_vault", "(", "self", ",", "vault_id", ")", ":", "if", "not", "self", ".", "supports_authorization_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found otherwise raise errors.NotFound", "##", "# pylint: disable=no-member", "return", "sessions", ".", "AuthorizationAdminSession", "(", "vault_id", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
49.73913
20.826087
def parse_cfgstr_list(cfgstr_list, smartcast=True, oldmode=True): r""" Parses a list of items in the format ['var1:val1', 'var2:val2', 'var3:val3'] the '=' character can be used instead of the ':' character if desired TODO: see ut.parse_cfgstr3 Args: cfgstr_list (list): Returns: dict: cfgdict Example: >>> # ENABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> cfgstr_list = ['var1=val1', 'var2=1', 'var3=1.0', 'var4=None', 'var5=[1,2,3]', 'var6=(a,b,c)'] >>> smartcast = True >>> cfgdict = parse_cfgstr_list(cfgstr_list, smartcast, oldmode=False) >>> result = ut.repr2(cfgdict, sorted_=True, newlines=False) >>> print(result) {'var1': 'val1', 'var2': 1, 'var3': 1.0, 'var4': None, 'var5': [1, 2, 3], 'var6': ('a', 'b', 'c')} {'var1': 'val1', 'var2': 1, 'var3': 1.0, 'var4': None} {'var4': None, 'var1': 'val1', 'var3': 1.0, 'var2': 1} """ cfgdict = {} for item in cfgstr_list: if item == '': continue if oldmode: keyval_tup = item.replace('=', ':').split(':') assert len(keyval_tup) == 2, '[!] Invalid cfgitem=%r' % (item,) key, val = keyval_tup else: keyval_tup = item.split('=') if len(keyval_tup) == 1: # single specifications are interpeted as booleans key = keyval_tup[0] val = True else: assert len(keyval_tup) >= 2, '[!] Invalid cfgitem=%r' % (item,) key, val = keyval_tup[0], '='.join(keyval_tup[1:]) if smartcast: val = util_type.smart_cast2(val) cfgdict[key] = val return cfgdict
[ "def", "parse_cfgstr_list", "(", "cfgstr_list", ",", "smartcast", "=", "True", ",", "oldmode", "=", "True", ")", ":", "cfgdict", "=", "{", "}", "for", "item", "in", "cfgstr_list", ":", "if", "item", "==", "''", ":", "continue", "if", "oldmode", ":", "keyval_tup", "=", "item", ".", "replace", "(", "'='", ",", "':'", ")", ".", "split", "(", "':'", ")", "assert", "len", "(", "keyval_tup", ")", "==", "2", ",", "'[!] Invalid cfgitem=%r'", "%", "(", "item", ",", ")", "key", ",", "val", "=", "keyval_tup", "else", ":", "keyval_tup", "=", "item", ".", "split", "(", "'='", ")", "if", "len", "(", "keyval_tup", ")", "==", "1", ":", "# single specifications are interpeted as booleans", "key", "=", "keyval_tup", "[", "0", "]", "val", "=", "True", "else", ":", "assert", "len", "(", "keyval_tup", ")", ">=", "2", ",", "'[!] Invalid cfgitem=%r'", "%", "(", "item", ",", ")", "key", ",", "val", "=", "keyval_tup", "[", "0", "]", ",", "'='", ".", "join", "(", "keyval_tup", "[", "1", ":", "]", ")", "if", "smartcast", ":", "val", "=", "util_type", ".", "smart_cast2", "(", "val", ")", "cfgdict", "[", "key", "]", "=", "val", "return", "cfgdict" ]
34.92
22.14
def debug(verbose): """Run the experiment locally.""" (id, tmp) = setup_experiment(debug=True, verbose=verbose) # Drop all the tables from the database. db.init_db(drop_all=True) # Switch to the temporary directory. cwd = os.getcwd() os.chdir(tmp) # Load psiTurk configuration. config = PsiturkConfig() config.load_config() # Set the mode to debug. config.set("Experiment Configuration", "mode", "debug") config.set("Shell Parameters", "launch_in_sandbox_mode", "true") config.set( "Server Parameters", "logfile", os.path.join(cwd, config.get("Server Parameters", "logfile"))) # Swap in the HotAirRecruiter os.rename("wallace_experiment.py", "wallace_experiment_tmp.py") with open("wallace_experiment_tmp.py", "r+") as f: with open("wallace_experiment.py", "w+") as f2: f2.write("from wallace.recruiters import HotAirRecruiter\n") for idx, line in enumerate(f): if re.search("\s*self.recruiter = (.*)", line): p = line.partition("self.recruiter =") f2.write(p[0] + p[1] + ' HotAirRecruiter\n') else: f2.write(line) os.remove("wallace_experiment_tmp.py") # Set environment variables. aws_vars = ['aws_access_key_id', 'aws_secret_access_key', 'aws_region'] for var in aws_vars: if var not in os.environ: os.environ[var] = config.get('AWS Access', var) pt_vars = ['psiturk_access_key_id', 'psiturk_secret_access_id'] for var in pt_vars: if var not in os.environ: os.environ[var] = config.get('psiTurk Access', var) if "HOST" not in os.environ: os.environ["HOST"] = config.get('Server Parameters', 'host') # Start up the local server log("Starting up the server...") # Try opening the psiTurk shell. try: p = pexpect.spawn("psiturk") p.expect_exact("]$") p.sendline("server on") p.expect_exact("Experiment server launching...") # Launche the experiment. time.sleep(4) host = config.get("Server Parameters", "host") port = config.get("Server Parameters", "port") subprocess.call( 'curl --data "" http://{}:{}/launch'.format(host, port), shell=True) log("Here's the psiTurk shell...") p.interact() except Exception: click.echo("\nCouldn't open psiTurk shell. Internet connection okay?") log("Completed debugging of experiment " + id + ".") os.chdir(cwd)
[ "def", "debug", "(", "verbose", ")", ":", "(", "id", ",", "tmp", ")", "=", "setup_experiment", "(", "debug", "=", "True", ",", "verbose", "=", "verbose", ")", "# Drop all the tables from the database.", "db", ".", "init_db", "(", "drop_all", "=", "True", ")", "# Switch to the temporary directory.", "cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "tmp", ")", "# Load psiTurk configuration.", "config", "=", "PsiturkConfig", "(", ")", "config", ".", "load_config", "(", ")", "# Set the mode to debug.", "config", ".", "set", "(", "\"Experiment Configuration\"", ",", "\"mode\"", ",", "\"debug\"", ")", "config", ".", "set", "(", "\"Shell Parameters\"", ",", "\"launch_in_sandbox_mode\"", ",", "\"true\"", ")", "config", ".", "set", "(", "\"Server Parameters\"", ",", "\"logfile\"", ",", "os", ".", "path", ".", "join", "(", "cwd", ",", "config", ".", "get", "(", "\"Server Parameters\"", ",", "\"logfile\"", ")", ")", ")", "# Swap in the HotAirRecruiter", "os", ".", "rename", "(", "\"wallace_experiment.py\"", ",", "\"wallace_experiment_tmp.py\"", ")", "with", "open", "(", "\"wallace_experiment_tmp.py\"", ",", "\"r+\"", ")", "as", "f", ":", "with", "open", "(", "\"wallace_experiment.py\"", ",", "\"w+\"", ")", "as", "f2", ":", "f2", ".", "write", "(", "\"from wallace.recruiters import HotAirRecruiter\\n\"", ")", "for", "idx", ",", "line", "in", "enumerate", "(", "f", ")", ":", "if", "re", ".", "search", "(", "\"\\s*self.recruiter = (.*)\"", ",", "line", ")", ":", "p", "=", "line", ".", "partition", "(", "\"self.recruiter =\"", ")", "f2", ".", "write", "(", "p", "[", "0", "]", "+", "p", "[", "1", "]", "+", "' HotAirRecruiter\\n'", ")", "else", ":", "f2", ".", "write", "(", "line", ")", "os", ".", "remove", "(", "\"wallace_experiment_tmp.py\"", ")", "# Set environment variables.", "aws_vars", "=", "[", "'aws_access_key_id'", ",", "'aws_secret_access_key'", ",", "'aws_region'", "]", "for", "var", "in", "aws_vars", ":", "if", "var", "not", "in", "os", ".", "environ", ":", "os", ".", "environ", "[", "var", "]", "=", "config", ".", "get", "(", "'AWS Access'", ",", "var", ")", "pt_vars", "=", "[", "'psiturk_access_key_id'", ",", "'psiturk_secret_access_id'", "]", "for", "var", "in", "pt_vars", ":", "if", "var", "not", "in", "os", ".", "environ", ":", "os", ".", "environ", "[", "var", "]", "=", "config", ".", "get", "(", "'psiTurk Access'", ",", "var", ")", "if", "\"HOST\"", "not", "in", "os", ".", "environ", ":", "os", ".", "environ", "[", "\"HOST\"", "]", "=", "config", ".", "get", "(", "'Server Parameters'", ",", "'host'", ")", "# Start up the local server", "log", "(", "\"Starting up the server...\"", ")", "# Try opening the psiTurk shell.", "try", ":", "p", "=", "pexpect", ".", "spawn", "(", "\"psiturk\"", ")", "p", ".", "expect_exact", "(", "\"]$\"", ")", "p", ".", "sendline", "(", "\"server on\"", ")", "p", ".", "expect_exact", "(", "\"Experiment server launching...\"", ")", "# Launche the experiment.", "time", ".", "sleep", "(", "4", ")", "host", "=", "config", ".", "get", "(", "\"Server Parameters\"", ",", "\"host\"", ")", "port", "=", "config", ".", "get", "(", "\"Server Parameters\"", ",", "\"port\"", ")", "subprocess", ".", "call", "(", "'curl --data \"\" http://{}:{}/launch'", ".", "format", "(", "host", ",", "port", ")", ",", "shell", "=", "True", ")", "log", "(", "\"Here's the psiTurk shell...\"", ")", "p", ".", "interact", "(", ")", "except", "Exception", ":", "click", ".", "echo", "(", "\"\\nCouldn't open psiTurk shell. Internet connection okay?\"", ")", "log", "(", "\"Completed debugging of experiment \"", "+", "id", "+", "\".\"", ")", "os", ".", "chdir", "(", "cwd", ")" ]
31.974684
21.126582
def respects(g,imp): """ g is an int, where each bit is an attribute implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise g and UV have the same number of digits """ if isinstance(g,str): g = int(g,2) if isinstance(imp,int): imp = istr(imp,3,g.bit_length()) V = int(imp.replace('1','2').replace('2','1'),2) U = int(imp.replace('1','0').replace('2','1'),2) ginU = U&g == U ginV = V&g == V return not ginU or ginV
[ "def", "respects", "(", "g", ",", "imp", ")", ":", "if", "isinstance", "(", "g", ",", "str", ")", ":", "g", "=", "int", "(", "g", ",", "2", ")", "if", "isinstance", "(", "imp", ",", "int", ")", ":", "imp", "=", "istr", "(", "imp", ",", "3", ",", "g", ".", "bit_length", "(", ")", ")", "V", "=", "int", "(", "imp", ".", "replace", "(", "'1'", ",", "'2'", ")", ".", "replace", "(", "'2'", ",", "'1'", ")", ",", "2", ")", "U", "=", "int", "(", "imp", ".", "replace", "(", "'1'", ",", "'0'", ")", ".", "replace", "(", "'2'", ",", "'1'", ")", ",", "2", ")", "ginU", "=", "U", "&", "g", "==", "U", "ginV", "=", "V", "&", "g", "==", "V", "return", "not", "ginU", "or", "ginV" ]
31.2
12
def get_subfields(self, datafield, subfield, i1=None, i2=None, exception=False): """ Return content of given `subfield` in `datafield`. Args: datafield (str): Section name (for example "001", "100", "700"). subfield (str): Subfield name (for example "a", "1", etc..). i1 (str, default None): Optional i1/ind1 parameter value, which will be used for search. i2 (str, default None): Optional i2/ind2 parameter value, which will be used for search. exception (bool): If ``True``, :exc:`~exceptions.KeyError` is raised when method couldn't found given `datafield` / `subfield`. If ``False``, blank array ``[]`` is returned. Returns: list: of :class:`.MARCSubrecord`. Raises: KeyError: If the subfield or datafield couldn't be found. Note: MARCSubrecord is practically same thing as string, but has defined :meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2` methods. You may need to be able to get this, because MARC XML depends on i/ind parameters from time to time (names of authors for example). """ if len(datafield) != 3: raise ValueError( "`datafield` parameter have to be exactly 3 chars long!" ) if len(subfield) != 1: raise ValueError( "Bad subfield specification - subfield have to be 1 char long!" ) # if datafield not found, return or raise exception if datafield not in self.datafields: if exception: raise KeyError(datafield + " is not in datafields!") return [] # look for subfield defined by `subfield`, `i1` and `i2` parameters output = [] for datafield in self.datafields[datafield]: if subfield not in datafield: continue # records are not returned just like plain string, but like # MARCSubrecord, because you will need ind1/ind2 values for sfield in datafield[subfield]: if i1 and sfield.i1 != i1: continue if i2 and sfield.i2 != i2: continue output.append(sfield) if not output and exception: raise KeyError(subfield + " couldn't be found in subfields!") return output
[ "def", "get_subfields", "(", "self", ",", "datafield", ",", "subfield", ",", "i1", "=", "None", ",", "i2", "=", "None", ",", "exception", "=", "False", ")", ":", "if", "len", "(", "datafield", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"`datafield` parameter have to be exactly 3 chars long!\"", ")", "if", "len", "(", "subfield", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Bad subfield specification - subfield have to be 1 char long!\"", ")", "# if datafield not found, return or raise exception", "if", "datafield", "not", "in", "self", ".", "datafields", ":", "if", "exception", ":", "raise", "KeyError", "(", "datafield", "+", "\" is not in datafields!\"", ")", "return", "[", "]", "# look for subfield defined by `subfield`, `i1` and `i2` parameters", "output", "=", "[", "]", "for", "datafield", "in", "self", ".", "datafields", "[", "datafield", "]", ":", "if", "subfield", "not", "in", "datafield", ":", "continue", "# records are not returned just like plain string, but like", "# MARCSubrecord, because you will need ind1/ind2 values", "for", "sfield", "in", "datafield", "[", "subfield", "]", ":", "if", "i1", "and", "sfield", ".", "i1", "!=", "i1", ":", "continue", "if", "i2", "and", "sfield", ".", "i2", "!=", "i2", ":", "continue", "output", ".", "append", "(", "sfield", ")", "if", "not", "output", "and", "exception", ":", "raise", "KeyError", "(", "subfield", "+", "\" couldn't be found in subfields!\"", ")", "return", "output" ]
36.617647
24
def render(self, request, template, context): """ Returns a response. By default, this will contain the rendered PDF, but if both ``allow_force_html`` is ``True`` and the querystring ``html=true`` was set it will return a plain HTML. """ if self.allow_force_html and self.request.GET.get('html', False): html = get_template(template).render(context) return HttpResponse(html) else: response = HttpResponse(content_type='application/pdf') if self.prompt_download: response['Content-Disposition'] = 'attachment; filename="{}"' \ .format(self.get_download_name()) helpers.render_pdf( template=template, file_=response, url_fetcher=self.url_fetcher, context=context, ) return response
[ "def", "render", "(", "self", ",", "request", ",", "template", ",", "context", ")", ":", "if", "self", ".", "allow_force_html", "and", "self", ".", "request", ".", "GET", ".", "get", "(", "'html'", ",", "False", ")", ":", "html", "=", "get_template", "(", "template", ")", ".", "render", "(", "context", ")", "return", "HttpResponse", "(", "html", ")", "else", ":", "response", "=", "HttpResponse", "(", "content_type", "=", "'application/pdf'", ")", "if", "self", ".", "prompt_download", ":", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=\"{}\"'", ".", "format", "(", "self", ".", "get_download_name", "(", ")", ")", "helpers", ".", "render_pdf", "(", "template", "=", "template", ",", "file_", "=", "response", ",", "url_fetcher", "=", "self", ".", "url_fetcher", ",", "context", "=", "context", ",", ")", "return", "response" ]
42.857143
15.714286
def tools(self, extra_params=None): """" All Tools in this Space """ return self.api._get_json( SpaceTool, space=self, rel_path=self._build_rel_path('space_tools'), extra_params=extra_params, )
[ "def", "tools", "(", "self", ",", "extra_params", "=", "None", ")", ":", "return", "self", ".", "api", ".", "_get_json", "(", "SpaceTool", ",", "space", "=", "self", ",", "rel_path", "=", "self", ".", "_build_rel_path", "(", "'space_tools'", ")", ",", "extra_params", "=", "extra_params", ",", ")" ]
27.2
10.5
def _is_socket(cls, stream): """Check if the given stream is a socket.""" try: fd = stream.fileno() except ValueError: # If it has no file descriptor, it's not a socket return False sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) try: # This will raise a socket.error if it's not a socket sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error as ex: if ex.args[0] != errno.ENOTSOCK: # It must be a socket return True else: # If an exception wasn't raised, it's a socket return True
[ "def", "_is_socket", "(", "cls", ",", "stream", ")", ":", "try", ":", "fd", "=", "stream", ".", "fileno", "(", ")", "except", "ValueError", ":", "# If it has no file descriptor, it's not a socket", "return", "False", "sock", "=", "socket", ".", "fromfd", "(", "fd", ",", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_RAW", ")", "try", ":", "# This will raise a socket.error if it's not a socket", "sock", ".", "getsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_TYPE", ")", "except", "socket", ".", "error", "as", "ex", ":", "if", "ex", ".", "args", "[", "0", "]", "!=", "errno", ".", "ENOTSOCK", ":", "# It must be a socket", "return", "True", "else", ":", "# If an exception wasn't raised, it's a socket", "return", "True" ]
35.526316
17.210526
def filter_kepler_lcdict(lcdict, filterflags=True, nanfilter='sap,pdc', timestoignore=None): '''This filters the Kepler `lcdict`, removing nans and bad observations. By default, this function removes points in the Kepler LC that have ANY quality flags set. Parameters ---------- lcdict : lcdict An `lcdict` produced by `consolidate_kepler_fitslc` or `read_kepler_fitslc`. filterflags : bool If True, will remove any measurements that have non-zero quality flags present. This usually indicates an issue with the instrument or spacecraft. nanfilter : {'sap','pdc','sap,pdc'} Indicates the flux measurement type(s) to apply the filtering to. timestoignore : list of tuples or None This is of the form:: [(time1_start, time1_end), (time2_start, time2_end), ...] and indicates the start and end times to mask out of the final lcdict. Use this to remove anything that wasn't caught by the quality flags. Returns ------- lcdict Returns an `lcdict` (this is useable by most astrobase functions for LC processing). The `lcdict` is filtered IN PLACE! ''' cols = lcdict['columns'] # filter all bad LC points as noted by quality flags if filterflags: nbefore = lcdict['time'].size filterind = lcdict['sap_quality'] == 0 for col in cols: if '.' in col: key, subkey = col.split('.') lcdict[key][subkey] = lcdict[key][subkey][filterind] else: lcdict[col] = lcdict[col][filterind] nafter = lcdict['time'].size LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s' % (nbefore, nafter)) if nanfilter and nanfilter == 'sap,pdc': notnanind = ( npisfinite(lcdict['sap']['sap_flux']) & npisfinite(lcdict['pdc']['pdcsap_flux']) & npisfinite(lcdict['time']) ) elif nanfilter and nanfilter == 'sap': notnanind = ( npisfinite(lcdict['sap']['sap_flux']) & npisfinite(lcdict['time']) ) elif nanfilter and nanfilter == 'pdc': notnanind = ( npisfinite(lcdict['pdc']['pdcsap_flux']) & npisfinite(lcdict['time']) ) # remove nans from all columns if nanfilter: nbefore = lcdict['time'].size for col in cols: if '.' in col: key, subkey = col.split('.') lcdict[key][subkey] = lcdict[key][subkey][notnanind] else: lcdict[col] = lcdict[col][notnanind] nafter = lcdict['time'].size LOGINFO('removed nans, ndet before = %s, ndet after = %s' % (nbefore, nafter)) # exclude all times in timestoignore if (timestoignore and isinstance(timestoignore, list) and len(timestoignore) > 0): exclind = npfull_like(lcdict['time'], True, dtype=np.bool_) nbefore = exclind.size # get all the masks for ignoretime in timestoignore: time0, time1 = ignoretime[0], ignoretime[1] thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1)) exclind = exclind & thismask # apply the masks for col in cols: if '.' in col: key, subkey = col.split('.') lcdict[key][subkey] = lcdict[key][subkey][exclind] else: lcdict[col] = lcdict[col][exclind] nafter = lcdict['time'].size LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s' % (nbefore, nafter)) return lcdict
[ "def", "filter_kepler_lcdict", "(", "lcdict", ",", "filterflags", "=", "True", ",", "nanfilter", "=", "'sap,pdc'", ",", "timestoignore", "=", "None", ")", ":", "cols", "=", "lcdict", "[", "'columns'", "]", "# filter all bad LC points as noted by quality flags", "if", "filterflags", ":", "nbefore", "=", "lcdict", "[", "'time'", "]", ".", "size", "filterind", "=", "lcdict", "[", "'sap_quality'", "]", "==", "0", "for", "col", "in", "cols", ":", "if", "'.'", "in", "col", ":", "key", ",", "subkey", "=", "col", ".", "split", "(", "'.'", ")", "lcdict", "[", "key", "]", "[", "subkey", "]", "=", "lcdict", "[", "key", "]", "[", "subkey", "]", "[", "filterind", "]", "else", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "[", "filterind", "]", "nafter", "=", "lcdict", "[", "'time'", "]", ".", "size", "LOGINFO", "(", "'applied quality flag filter, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "if", "nanfilter", "and", "nanfilter", "==", "'sap,pdc'", ":", "notnanind", "=", "(", "npisfinite", "(", "lcdict", "[", "'sap'", "]", "[", "'sap_flux'", "]", ")", "&", "npisfinite", "(", "lcdict", "[", "'pdc'", "]", "[", "'pdcsap_flux'", "]", ")", "&", "npisfinite", "(", "lcdict", "[", "'time'", "]", ")", ")", "elif", "nanfilter", "and", "nanfilter", "==", "'sap'", ":", "notnanind", "=", "(", "npisfinite", "(", "lcdict", "[", "'sap'", "]", "[", "'sap_flux'", "]", ")", "&", "npisfinite", "(", "lcdict", "[", "'time'", "]", ")", ")", "elif", "nanfilter", "and", "nanfilter", "==", "'pdc'", ":", "notnanind", "=", "(", "npisfinite", "(", "lcdict", "[", "'pdc'", "]", "[", "'pdcsap_flux'", "]", ")", "&", "npisfinite", "(", "lcdict", "[", "'time'", "]", ")", ")", "# remove nans from all columns", "if", "nanfilter", ":", "nbefore", "=", "lcdict", "[", "'time'", "]", ".", "size", "for", "col", "in", "cols", ":", "if", "'.'", "in", "col", ":", "key", ",", "subkey", "=", "col", ".", "split", "(", "'.'", ")", "lcdict", "[", "key", "]", "[", "subkey", "]", "=", "lcdict", "[", "key", "]", "[", "subkey", "]", "[", "notnanind", "]", "else", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "[", "notnanind", "]", "nafter", "=", "lcdict", "[", "'time'", "]", ".", "size", "LOGINFO", "(", "'removed nans, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "# exclude all times in timestoignore", "if", "(", "timestoignore", "and", "isinstance", "(", "timestoignore", ",", "list", ")", "and", "len", "(", "timestoignore", ")", ">", "0", ")", ":", "exclind", "=", "npfull_like", "(", "lcdict", "[", "'time'", "]", ",", "True", ",", "dtype", "=", "np", ".", "bool_", ")", "nbefore", "=", "exclind", ".", "size", "# get all the masks", "for", "ignoretime", "in", "timestoignore", ":", "time0", ",", "time1", "=", "ignoretime", "[", "0", "]", ",", "ignoretime", "[", "1", "]", "thismask", "=", "~", "(", "(", "lcdict", "[", "'time'", "]", ">=", "time0", ")", "&", "(", "lcdict", "[", "'time'", "]", "<=", "time1", ")", ")", "exclind", "=", "exclind", "&", "thismask", "# apply the masks", "for", "col", "in", "cols", ":", "if", "'.'", "in", "col", ":", "key", ",", "subkey", "=", "col", ".", "split", "(", "'.'", ")", "lcdict", "[", "key", "]", "[", "subkey", "]", "=", "lcdict", "[", "key", "]", "[", "subkey", "]", "[", "exclind", "]", "else", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "[", "exclind", "]", "nafter", "=", "lcdict", "[", "'time'", "]", ".", "size", "LOGINFO", "(", "'removed timestoignore, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "return", "lcdict" ]
29.712
22.064
def get_payments(self): """Get all your payments. Returns: list of dicts: payments For each payout in the list, a dict contains the following items: * nmrAmount (`decimal.Decimal`) * usdAmount (`decimal.Decimal`) * tournament (`str`) * round (`dict`) * number (`int`) * openTime (`datetime`) * resolveTime (`datetime`) * resolvedGeneral (`bool`) * resolvedStaking (`bool`) Example: >>> api = NumerAPI(secret_key="..", public_id="..") >>> api.get_payments() [{'nmrAmount': Decimal('0.00'), 'round': {'number': 84, 'openTime': datetime.datetime(2017, 12, 2, 18, 0, tzinfo=tzutc()), 'resolveTime': datetime.datetime(2018, 1, 1, 18, 0, tzinfo=tzutc()), 'resolvedGeneral': True, 'resolvedStaking': True}, 'tournament': 'staking', 'usdAmount': Decimal('17.44')}, ... ] """ query = """ query { user { payments { nmrAmount round { number openTime resolveTime resolvedGeneral resolvedStaking } tournament usdAmount } } } """ data = self.raw_query(query, authorization=True)['data'] payments = data['user']['payments'] # convert strings to python objects for p in payments: utils.replace(p['round'], "openTime", utils.parse_datetime_string) utils.replace(p['round'], "resolveTime", utils.parse_datetime_string) utils.replace(p, "usdAmount", utils.parse_float_string) utils.replace(p, "nmrAmount", utils.parse_float_string) return payments
[ "def", "get_payments", "(", "self", ")", ":", "query", "=", "\"\"\"\n query {\n user {\n payments {\n nmrAmount\n round {\n number\n openTime\n resolveTime\n resolvedGeneral\n resolvedStaking\n }\n tournament\n usdAmount\n }\n }\n }\n \"\"\"", "data", "=", "self", ".", "raw_query", "(", "query", ",", "authorization", "=", "True", ")", "[", "'data'", "]", "payments", "=", "data", "[", "'user'", "]", "[", "'payments'", "]", "# convert strings to python objects", "for", "p", "in", "payments", ":", "utils", ".", "replace", "(", "p", "[", "'round'", "]", ",", "\"openTime\"", ",", "utils", ".", "parse_datetime_string", ")", "utils", ".", "replace", "(", "p", "[", "'round'", "]", ",", "\"resolveTime\"", ",", "utils", ".", "parse_datetime_string", ")", "utils", ".", "replace", "(", "p", ",", "\"usdAmount\"", ",", "utils", ".", "parse_float_string", ")", "utils", ".", "replace", "(", "p", ",", "\"nmrAmount\"", ",", "utils", ".", "parse_float_string", ")", "return", "payments" ]
33.583333
15.75
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False): ''' Get a WikipediaPage object for the page with title `title` or the pageid `pageid` (mutually exclusive). Keyword arguments: * title - the title of the page to load * pageid - the numeric pageid of the page to load * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError * preload - load content, summary, images, references, and links during initialization ''' if title is not None: if auto_suggest: results, suggestion = search(title, results=1, suggestion=True) try: title = suggestion or results[0] except IndexError: # if there is no suggestion or search results, the page doesn't exist raise PageError(title) return WikipediaPage(title, redirect=redirect, preload=preload) elif pageid is not None: return WikipediaPage(pageid=pageid, preload=preload) else: raise ValueError("Either a title or a pageid must be specified")
[ "def", "page", "(", "title", "=", "None", ",", "pageid", "=", "None", ",", "auto_suggest", "=", "True", ",", "redirect", "=", "True", ",", "preload", "=", "False", ")", ":", "if", "title", "is", "not", "None", ":", "if", "auto_suggest", ":", "results", ",", "suggestion", "=", "search", "(", "title", ",", "results", "=", "1", ",", "suggestion", "=", "True", ")", "try", ":", "title", "=", "suggestion", "or", "results", "[", "0", "]", "except", "IndexError", ":", "# if there is no suggestion or search results, the page doesn't exist", "raise", "PageError", "(", "title", ")", "return", "WikipediaPage", "(", "title", ",", "redirect", "=", "redirect", ",", "preload", "=", "preload", ")", "elif", "pageid", "is", "not", "None", ":", "return", "WikipediaPage", "(", "pageid", "=", "pageid", ",", "preload", "=", "preload", ")", "else", ":", "raise", "ValueError", "(", "\"Either a title or a pageid must be specified\"", ")" ]
38.814815
25.333333
def glymurrc_fname(): """Return the path to the configuration file. Search order: 1) current working directory 2) environ var XDG_CONFIG_HOME 3) $HOME/.config/glymur/glymurrc """ # Current directory. fname = os.path.join(os.getcwd(), 'glymurrc') if os.path.exists(fname): return fname confdir = get_configdir() if confdir is not None: fname = os.path.join(confdir, 'glymurrc') if os.path.exists(fname): return fname # didn't find a configuration file. return None
[ "def", "glymurrc_fname", "(", ")", ":", "# Current directory.", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'glymurrc'", ")", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "return", "fname", "confdir", "=", "get_configdir", "(", ")", "if", "confdir", "is", "not", "None", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "confdir", ",", "'glymurrc'", ")", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "return", "fname", "# didn't find a configuration file.", "return", "None" ]
24.818182
15.727273
def _nemo_accpars(self,vo,ro): """ NAME: _nemo_accpars PURPOSE: return the accpars potential parameters for use of this potential with NEMO INPUT: vo - velocity unit in km/s ro - length unit in kpc OUTPUT: accpars string HISTORY: 2014-12-18 - Written - Bovy (IAS) """ warnings.warn("NEMO's LogPot does not allow flattening in z (for some reason); therefore, flip y and z in NEMO wrt galpy; also does not allow the triaxial b parameter",galpyWarning) ampl= self._amp*vo**2. return "0,%s,%s,1.0,%s" % (ampl, self._core2*ro**2.*self._q**(2./3.), #somewhat weird gyrfalcon implementation self._q)
[ "def", "_nemo_accpars", "(", "self", ",", "vo", ",", "ro", ")", ":", "warnings", ".", "warn", "(", "\"NEMO's LogPot does not allow flattening in z (for some reason); therefore, flip y and z in NEMO wrt galpy; also does not allow the triaxial b parameter\"", ",", "galpyWarning", ")", "ampl", "=", "self", ".", "_amp", "*", "vo", "**", "2.", "return", "\"0,%s,%s,1.0,%s\"", "%", "(", "ampl", ",", "self", ".", "_core2", "*", "ro", "**", "2.", "*", "self", ".", "_q", "**", "(", "2.", "/", "3.", ")", ",", "#somewhat weird gyrfalcon implementation", "self", ".", "_q", ")" ]
26.266667
29.933333
def all(self, fields=None, include_fields=True, page=None, per_page=None, extra_params=None): """Retrieves a list of all the applications. Important: The client_secret and encryption_key attributes can only be retrieved with the read:client_keys scope. Args: fields (list of str, optional): A list of fields to include or exclude from the result (depending on include_fields). Empty to retrieve all fields. include_fields (bool, optional): True if the fields specified are to be included in the result, False otherwise. page (int): The result's page number (zero based). per_page (int, optional): The amount of entries per page. extra_params (dictionary, optional): The extra parameters to add to the request. The fields, include_fields, page and per_page values specified as parameters take precedence over the ones defined here. See: https://auth0.com/docs/api/management/v2#!/Clients/get_clients """ params = extra_params or {} params['fields'] = fields and ','.join(fields) or None params['include_fields'] = str(include_fields).lower() params['page'] = page params['per_page'] = per_page return self.client.get(self._url(), params=params)
[ "def", "all", "(", "self", ",", "fields", "=", "None", ",", "include_fields", "=", "True", ",", "page", "=", "None", ",", "per_page", "=", "None", ",", "extra_params", "=", "None", ")", ":", "params", "=", "extra_params", "or", "{", "}", "params", "[", "'fields'", "]", "=", "fields", "and", "','", ".", "join", "(", "fields", ")", "or", "None", "params", "[", "'include_fields'", "]", "=", "str", "(", "include_fields", ")", ".", "lower", "(", ")", "params", "[", "'page'", "]", "=", "page", "params", "[", "'per_page'", "]", "=", "per_page", "return", "self", ".", "client", ".", "get", "(", "self", ".", "_url", "(", ")", ",", "params", "=", "params", ")" ]
41.875
28.15625
def send_quick_chat(self, team_only, quick_chat): """ Sends a quick chat to the other bots. If it is QuickChats.CHAT_NONE or None it does not send a quick chat to other bots. :param team_only: either True or False, this says if the quick chat should only go to team members. :param quick_chat: The quick chat selection, available chats are defined in quick_chats.py """ if quick_chat == QuickChats.CHAT_NONE or quick_chat is None: return self.__quick_chat_func(team_only, quick_chat)
[ "def", "send_quick_chat", "(", "self", ",", "team_only", ",", "quick_chat", ")", ":", "if", "quick_chat", "==", "QuickChats", ".", "CHAT_NONE", "or", "quick_chat", "is", "None", ":", "return", "self", ".", "__quick_chat_func", "(", "team_only", ",", "quick_chat", ")" ]
55
25.2
def add_raw_code(self, string_or_list): """Add raw Gmsh code. """ if _is_string(string_or_list): self._GMSH_CODE.append(string_or_list) else: assert isinstance(string_or_list, list) for string in string_or_list: self._GMSH_CODE.append(string) return
[ "def", "add_raw_code", "(", "self", ",", "string_or_list", ")", ":", "if", "_is_string", "(", "string_or_list", ")", ":", "self", ".", "_GMSH_CODE", ".", "append", "(", "string_or_list", ")", "else", ":", "assert", "isinstance", "(", "string_or_list", ",", "list", ")", "for", "string", "in", "string_or_list", ":", "self", ".", "_GMSH_CODE", ".", "append", "(", "string", ")", "return" ]
33.2
8.4
def coarsen_matrix(Z, xlevel=0, ylevel=0, method='average'): """ This returns a coarsened numpy matrix. method can be 'average', 'maximum', or 'minimum' """ # coarsen x if not ylevel: Z_coarsened = Z else: temp = [] for z in Z: temp.append(coarsen_array(z, ylevel, method)) Z_coarsened = _n.array(temp) # coarsen y if xlevel: Z_coarsened = Z_coarsened.transpose() temp = [] for z in Z_coarsened: temp.append(coarsen_array(z, xlevel, method)) Z_coarsened = _n.array(temp).transpose() return Z_coarsened # first coarsen the columns (if necessary) if ylevel: Z_ycoarsened = [] for c in Z: Z_ycoarsened.append(coarsen_array(c, ylevel, method)) Z_ycoarsened = _n.array(Z_ycoarsened) # now coarsen the rows if xlevel: return coarsen_array(Z_ycoarsened, xlevel, method) else: return _n.array(Z_ycoarsened)
[ "def", "coarsen_matrix", "(", "Z", ",", "xlevel", "=", "0", ",", "ylevel", "=", "0", ",", "method", "=", "'average'", ")", ":", "# coarsen x", "if", "not", "ylevel", ":", "Z_coarsened", "=", "Z", "else", ":", "temp", "=", "[", "]", "for", "z", "in", "Z", ":", "temp", ".", "append", "(", "coarsen_array", "(", "z", ",", "ylevel", ",", "method", ")", ")", "Z_coarsened", "=", "_n", ".", "array", "(", "temp", ")", "# coarsen y", "if", "xlevel", ":", "Z_coarsened", "=", "Z_coarsened", ".", "transpose", "(", ")", "temp", "=", "[", "]", "for", "z", "in", "Z_coarsened", ":", "temp", ".", "append", "(", "coarsen_array", "(", "z", ",", "xlevel", ",", "method", ")", ")", "Z_coarsened", "=", "_n", ".", "array", "(", "temp", ")", ".", "transpose", "(", ")", "return", "Z_coarsened", "# first coarsen the columns (if necessary)", "if", "ylevel", ":", "Z_ycoarsened", "=", "[", "]", "for", "c", "in", "Z", ":", "Z_ycoarsened", ".", "append", "(", "coarsen_array", "(", "c", ",", "ylevel", ",", "method", ")", ")", "Z_ycoarsened", "=", "_n", ".", "array", "(", "Z_ycoarsened", ")", "# now coarsen the rows", "if", "xlevel", ":", "return", "coarsen_array", "(", "Z_ycoarsened", ",", "xlevel", ",", "method", ")", "else", ":", "return", "_n", ".", "array", "(", "Z_ycoarsened", ")" ]
26.428571
21.971429
def parse(text): """Try to parse into a date. Return: tuple (year, month, date) if successful; otherwise None. """ try: ymd = text.lower().split('-') assert len(ymd) == 3 year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0]) month = -1 if ymd[1] == 'xx' else int(ymd[1]) day = -1 if ymd[2] == 'xx' else int(ymd[2]) assert not year == month == day == -1 assert month == -1 or 1 <= month <= 12 assert day == -1 or 1 <= day <= 31 return (year, month, day) except (ValueError, AssertionError): return None
[ "def", "parse", "(", "text", ")", ":", "try", ":", "ymd", "=", "text", ".", "lower", "(", ")", ".", "split", "(", "'-'", ")", "assert", "len", "(", "ymd", ")", "==", "3", "year", "=", "-", "1", "if", "ymd", "[", "0", "]", "in", "(", "'xx'", ",", "'xxxx'", ")", "else", "int", "(", "ymd", "[", "0", "]", ")", "month", "=", "-", "1", "if", "ymd", "[", "1", "]", "==", "'xx'", "else", "int", "(", "ymd", "[", "1", "]", ")", "day", "=", "-", "1", "if", "ymd", "[", "2", "]", "==", "'xx'", "else", "int", "(", "ymd", "[", "2", "]", ")", "assert", "not", "year", "==", "month", "==", "day", "==", "-", "1", "assert", "month", "==", "-", "1", "or", "1", "<=", "month", "<=", "12", "assert", "day", "==", "-", "1", "or", "1", "<=", "day", "<=", "31", "return", "(", "year", ",", "month", ",", "day", ")", "except", "(", "ValueError", ",", "AssertionError", ")", ":", "return", "None" ]
36.555556
14.5
def convert_layer_to_pil(layer, apply_icc=True, **kwargs): """Convert Layer to PIL Image.""" from PIL import Image header = layer._psd._record.header if header.color_mode == ColorMode.BITMAP: raise NotImplementedError width, height = layer.width, layer.height channels, alpha = [], None for ci, cd in zip(layer._record.channel_info, layer._channels): if ci.id in (ChannelID.USER_LAYER_MASK, ChannelID.REAL_USER_LAYER_MASK): continue channel = cd.get_data(width, height, header.depth, header.version) channel_image = _create_channel( (width, height), channel, header.depth ) if ci.id == ChannelID.TRANSPARENCY_MASK: alpha = channel_image else: channels.append(channel_image) mode = get_pil_mode(header.color_mode.name) channels = _check_channels(channels, header.color_mode) image = Image.merge(mode, channels) if mode == 'CMYK': image = image.point(lambda x: 255 - x) if alpha is not None: if mode in ('RGB', 'L'): image.putalpha(alpha) else: logger.debug('Alpha channel is not supported in %s' % (mode)) if apply_icc and 'ICC_PROFILE' in layer._psd.image_resources: image = _apply_icc( image, layer._psd.image_resources.get_data('ICC_PROFILE') ) return image
[ "def", "convert_layer_to_pil", "(", "layer", ",", "apply_icc", "=", "True", ",", "*", "*", "kwargs", ")", ":", "from", "PIL", "import", "Image", "header", "=", "layer", ".", "_psd", ".", "_record", ".", "header", "if", "header", ".", "color_mode", "==", "ColorMode", ".", "BITMAP", ":", "raise", "NotImplementedError", "width", ",", "height", "=", "layer", ".", "width", ",", "layer", ".", "height", "channels", ",", "alpha", "=", "[", "]", ",", "None", "for", "ci", ",", "cd", "in", "zip", "(", "layer", ".", "_record", ".", "channel_info", ",", "layer", ".", "_channels", ")", ":", "if", "ci", ".", "id", "in", "(", "ChannelID", ".", "USER_LAYER_MASK", ",", "ChannelID", ".", "REAL_USER_LAYER_MASK", ")", ":", "continue", "channel", "=", "cd", ".", "get_data", "(", "width", ",", "height", ",", "header", ".", "depth", ",", "header", ".", "version", ")", "channel_image", "=", "_create_channel", "(", "(", "width", ",", "height", ")", ",", "channel", ",", "header", ".", "depth", ")", "if", "ci", ".", "id", "==", "ChannelID", ".", "TRANSPARENCY_MASK", ":", "alpha", "=", "channel_image", "else", ":", "channels", ".", "append", "(", "channel_image", ")", "mode", "=", "get_pil_mode", "(", "header", ".", "color_mode", ".", "name", ")", "channels", "=", "_check_channels", "(", "channels", ",", "header", ".", "color_mode", ")", "image", "=", "Image", ".", "merge", "(", "mode", ",", "channels", ")", "if", "mode", "==", "'CMYK'", ":", "image", "=", "image", ".", "point", "(", "lambda", "x", ":", "255", "-", "x", ")", "if", "alpha", "is", "not", "None", ":", "if", "mode", "in", "(", "'RGB'", ",", "'L'", ")", ":", "image", ".", "putalpha", "(", "alpha", ")", "else", ":", "logger", ".", "debug", "(", "'Alpha channel is not supported in %s'", "%", "(", "mode", ")", ")", "if", "apply_icc", "and", "'ICC_PROFILE'", "in", "layer", ".", "_psd", ".", "image_resources", ":", "image", "=", "_apply_icc", "(", "image", ",", "layer", ".", "_psd", ".", "image_resources", ".", "get_data", "(", "'ICC_PROFILE'", ")", ")", "return", "image" ]
38.388889
15.305556
def get_immoralities(self): """ Finds all the immoralities in the model A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y . Returns ------- set: A set of all the immoralities in the model Examples --------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'), ... ('intel', 'SAT'), ('grade', 'letter')]) >>> student.get_immoralities() {('diff','intel')} """ immoralities = set() for node in self.nodes(): for parents in itertools.combinations(self.predecessors(node), 2): if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]): immoralities.add(tuple(sorted(parents))) return immoralities
[ "def", "get_immoralities", "(", "self", ")", ":", "immoralities", "=", "set", "(", ")", "for", "node", "in", "self", ".", "nodes", "(", ")", ":", "for", "parents", "in", "itertools", ".", "combinations", "(", "self", ".", "predecessors", "(", "node", ")", ",", "2", ")", ":", "if", "not", "self", ".", "has_edge", "(", "parents", "[", "0", "]", ",", "parents", "[", "1", "]", ")", "and", "not", "self", ".", "has_edge", "(", "parents", "[", "1", "]", ",", "parents", "[", "0", "]", ")", ":", "immoralities", ".", "add", "(", "tuple", "(", "sorted", "(", "parents", ")", ")", ")", "return", "immoralities" ]
38.333333
21.833333
def xml_extract_date(node, xpath, date_format='%d/%m/%Y'): """ :param node: the node to be queried :param xpath: the path to fetch the child node that has the wanted date """ return datetime.strptime(xml_extract_text(node, xpath), date_format)
[ "def", "xml_extract_date", "(", "node", ",", "xpath", ",", "date_format", "=", "'%d/%m/%Y'", ")", ":", "return", "datetime", ".", "strptime", "(", "xml_extract_text", "(", "node", ",", "xpath", ")", ",", "date_format", ")" ]
43
14.333333
def remove_tags(self, tags, **kwargs): """ :param tags: Tags to remove from the app :type tags: array Removes the specified application name tags (aliases) from this app, so that it is no longer addressable by those aliases. The current user must be a developer of the app. """ if self._dxid is not None: return dxpy.api.app_remove_tags(self._dxid, input_params={"tags": tags}, **kwargs) else: return dxpy.api.app_remove_tags('app-' + self._name, alias=self._alias, input_params={"tags": tags}, **kwargs)
[ "def", "remove_tags", "(", "self", ",", "tags", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_dxid", "is", "not", "None", ":", "return", "dxpy", ".", "api", ".", "app_remove_tags", "(", "self", ".", "_dxid", ",", "input_params", "=", "{", "\"tags\"", ":", "tags", "}", ",", "*", "*", "kwargs", ")", "else", ":", "return", "dxpy", ".", "api", ".", "app_remove_tags", "(", "'app-'", "+", "self", ".", "_name", ",", "alias", "=", "self", ".", "_alias", ",", "input_params", "=", "{", "\"tags\"", ":", "tags", "}", ",", "*", "*", "kwargs", ")" ]
39.5
24.375
def _estimate_p_values(self): """estimate the p-values for all features """ if not self._is_fitted: raise AttributeError('GAM has not been fitted. Call fit first.') p_values = [] for term_i in range(len(self.terms)): p_values.append(self._compute_p_value(term_i)) return p_values
[ "def", "_estimate_p_values", "(", "self", ")", ":", "if", "not", "self", ".", "_is_fitted", ":", "raise", "AttributeError", "(", "'GAM has not been fitted. Call fit first.'", ")", "p_values", "=", "[", "]", "for", "term_i", "in", "range", "(", "len", "(", "self", ".", "terms", ")", ")", ":", "p_values", ".", "append", "(", "self", ".", "_compute_p_value", "(", "term_i", ")", ")", "return", "p_values" ]
31.181818
17.727273
def split_data(data, num_slice, batch_axis=0, even_split=True): """Splits an NDArray into `num_slice` slices along `batch_axis`. Usually used for data parallelism where each slices is sent to one device (i.e. GPU). Parameters ---------- data : NDArray A batch of data. num_slice : int Number of desired slices. batch_axis : int, default 0 The axis along which to slice. even_split : bool, default True Whether to force all slices to have the same number of elements. If `True`, an error will be raised when `num_slice` does not evenly divide `data.shape[batch_axis]`. Returns ------- list of NDArray Return value is a list even if `num_slice` is 1. """ size = data.shape[batch_axis] if even_split and size % num_slice != 0: raise ValueError( "data with shape %s cannot be evenly split into %d slices along axis %d. " \ "Use a batch size that's multiple of %d or set even_split=False to allow " \ "uneven partitioning of data."%( str(data.shape), num_slice, batch_axis, num_slice)) step = size // num_slice # If size < num_slice, make fewer slices if not even_split and size < num_slice: step = 1 num_slice = size if batch_axis == 0: slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size] for i in range(num_slice)] elif even_split: slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis) else: slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step) if i < num_slice - 1 else ndarray.slice_axis(data, batch_axis, i*step, size) for i in range(num_slice)] return slices
[ "def", "split_data", "(", "data", ",", "num_slice", ",", "batch_axis", "=", "0", ",", "even_split", "=", "True", ")", ":", "size", "=", "data", ".", "shape", "[", "batch_axis", "]", "if", "even_split", "and", "size", "%", "num_slice", "!=", "0", ":", "raise", "ValueError", "(", "\"data with shape %s cannot be evenly split into %d slices along axis %d. \"", "\"Use a batch size that's multiple of %d or set even_split=False to allow \"", "\"uneven partitioning of data.\"", "%", "(", "str", "(", "data", ".", "shape", ")", ",", "num_slice", ",", "batch_axis", ",", "num_slice", ")", ")", "step", "=", "size", "//", "num_slice", "# If size < num_slice, make fewer slices", "if", "not", "even_split", "and", "size", "<", "num_slice", ":", "step", "=", "1", "num_slice", "=", "size", "if", "batch_axis", "==", "0", ":", "slices", "=", "[", "data", "[", "i", "*", "step", ":", "(", "i", "+", "1", ")", "*", "step", "]", "if", "i", "<", "num_slice", "-", "1", "else", "data", "[", "i", "*", "step", ":", "size", "]", "for", "i", "in", "range", "(", "num_slice", ")", "]", "elif", "even_split", ":", "slices", "=", "ndarray", ".", "split", "(", "data", ",", "num_outputs", "=", "num_slice", ",", "axis", "=", "batch_axis", ")", "else", ":", "slices", "=", "[", "ndarray", ".", "slice_axis", "(", "data", ",", "batch_axis", ",", "i", "*", "step", ",", "(", "i", "+", "1", ")", "*", "step", ")", "if", "i", "<", "num_slice", "-", "1", "else", "ndarray", ".", "slice_axis", "(", "data", ",", "batch_axis", ",", "i", "*", "step", ",", "size", ")", "for", "i", "in", "range", "(", "num_slice", ")", "]", "return", "slices" ]
36.469388
20.612245
def _parse_current_member(self, previous_rank, values): """ Parses the column texts of a member row into a member dictionary. Parameters ---------- previous_rank: :class:`dict`[int, str] The last rank present in the rows. values: tuple[:class:`str`] A list of row contents. """ rank, name, vocation, level, joined, status = values rank = previous_rank[1] if rank == " " else rank title = None previous_rank[1] = rank m = title_regex.match(name) if m: name = m.group(1) title = m.group(2) self.members.append(GuildMember(name, rank, title, int(level), vocation, joined=joined, online=status == "online"))
[ "def", "_parse_current_member", "(", "self", ",", "previous_rank", ",", "values", ")", ":", "rank", ",", "name", ",", "vocation", ",", "level", ",", "joined", ",", "status", "=", "values", "rank", "=", "previous_rank", "[", "1", "]", "if", "rank", "==", "\" \"", "else", "rank", "title", "=", "None", "previous_rank", "[", "1", "]", "=", "rank", "m", "=", "title_regex", ".", "match", "(", "name", ")", "if", "m", ":", "name", "=", "m", ".", "group", "(", "1", ")", "title", "=", "m", ".", "group", "(", "2", ")", "self", ".", "members", ".", "append", "(", "GuildMember", "(", "name", ",", "rank", ",", "title", ",", "int", "(", "level", ")", ",", "vocation", ",", "joined", "=", "joined", ",", "online", "=", "status", "==", "\"online\"", ")", ")" ]
37.333333
16.857143
def mf_aBl(self): """ These are the expected log likelihoods (node potentials) as seen from the discrete states. """ mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states)) ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \ self.emission_distns for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)): mf_aBl[0,idx] = d1.expected_log_likelihood( stats=self.E_init_stats) mf_aBl[:-1,idx] += d2.expected_log_likelihood( stats=self.E_dynamics_stats) mf_aBl[:,idx] += d3.expected_log_likelihood( stats=self.E_emission_stats) mf_aBl[np.isnan(mf_aBl).any(1)] = 0. return mf_aBl
[ "def", "mf_aBl", "(", "self", ")", ":", "mf_aBl", "=", "self", ".", "_mf_aBl", "=", "np", ".", "zeros", "(", "(", "self", ".", "T", ",", "self", ".", "num_states", ")", ")", "ids", ",", "dds", ",", "eds", "=", "self", ".", "init_dynamics_distns", ",", "self", ".", "dynamics_distns", ",", "self", ".", "emission_distns", "for", "idx", ",", "(", "d1", ",", "d2", ",", "d3", ")", "in", "enumerate", "(", "zip", "(", "ids", ",", "dds", ",", "eds", ")", ")", ":", "mf_aBl", "[", "0", ",", "idx", "]", "=", "d1", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_init_stats", ")", "mf_aBl", "[", ":", "-", "1", ",", "idx", "]", "+=", "d2", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_dynamics_stats", ")", "mf_aBl", "[", ":", ",", "idx", "]", "+=", "d3", ".", "expected_log_likelihood", "(", "stats", "=", "self", ".", "E_emission_stats", ")", "mf_aBl", "[", "np", ".", "isnan", "(", "mf_aBl", ")", ".", "any", "(", "1", ")", "]", "=", "0.", "return", "mf_aBl" ]
39.052632
15.789474
def _parse_row(row): """Parses HTML row :param row: HTML row :return: list of values in row """ data = [] labels = HtmlTable._get_row_tag(row, "th") if labels: data += labels columns = HtmlTable._get_row_tag(row, "td") if columns: data += columns return data
[ "def", "_parse_row", "(", "row", ")", ":", "data", "=", "[", "]", "labels", "=", "HtmlTable", ".", "_get_row_tag", "(", "row", ",", "\"th\"", ")", "if", "labels", ":", "data", "+=", "labels", "columns", "=", "HtmlTable", ".", "_get_row_tag", "(", "row", ",", "\"td\"", ")", "if", "columns", ":", "data", "+=", "columns", "return", "data" ]
19.444444
20.5
def _accept_responses(self, expected_header, info_cb, timeout_ms=None): """Accepts responses until the expected header or a FAIL. Arguments: expected_header: OKAY or DATA info_cb: Optional callback for text sent from the bootloader. timeout_ms: Timeout in milliseconds to wait for each response. Raises: FastbootStateMismatch: Fastboot responded with the wrong packet type. FastbootRemoteFailure: Fastboot reported failure. FastbootInvalidResponse: Fastboot responded with an unknown packet type. Returns: OKAY packet's message. """ while True: response = self.usb.read(64, timeout_ms=timeout_ms) header = response[:4] remaining = response[4:] if header == 'INFO': info_cb(FastbootMessage(remaining, header)) elif header in self.FINAL_HEADERS: if header != expected_header: raise usb_exceptions.FastbootStateMismatch( 'Expected %s, got %s', expected_header, header) if header == 'OKAY': info_cb(FastbootMessage(remaining, header)) return remaining elif header == 'FAIL': info_cb(FastbootMessage(remaining, header)) raise usb_exceptions.FastbootRemoteFailure('FAIL: %s', remaining) else: raise usb_exceptions.FastbootInvalidResponse( 'Got unknown header %s and response %s', header, remaining)
[ "def", "_accept_responses", "(", "self", ",", "expected_header", ",", "info_cb", ",", "timeout_ms", "=", "None", ")", ":", "while", "True", ":", "response", "=", "self", ".", "usb", ".", "read", "(", "64", ",", "timeout_ms", "=", "timeout_ms", ")", "header", "=", "response", "[", ":", "4", "]", "remaining", "=", "response", "[", "4", ":", "]", "if", "header", "==", "'INFO'", ":", "info_cb", "(", "FastbootMessage", "(", "remaining", ",", "header", ")", ")", "elif", "header", "in", "self", ".", "FINAL_HEADERS", ":", "if", "header", "!=", "expected_header", ":", "raise", "usb_exceptions", ".", "FastbootStateMismatch", "(", "'Expected %s, got %s'", ",", "expected_header", ",", "header", ")", "if", "header", "==", "'OKAY'", ":", "info_cb", "(", "FastbootMessage", "(", "remaining", ",", "header", ")", ")", "return", "remaining", "elif", "header", "==", "'FAIL'", ":", "info_cb", "(", "FastbootMessage", "(", "remaining", ",", "header", ")", ")", "raise", "usb_exceptions", ".", "FastbootRemoteFailure", "(", "'FAIL: %s'", ",", "remaining", ")", "else", ":", "raise", "usb_exceptions", ".", "FastbootInvalidResponse", "(", "'Got unknown header %s and response %s'", ",", "header", ",", "remaining", ")" ]
38.083333
20.305556
def get_vlan_brief_input_request_type_get_next_request_last_rcvd_vlan_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief input = ET.SubElement(get_vlan_brief, "input") request_type = ET.SubElement(input, "request-type") get_next_request = ET.SubElement(request_type, "get-next-request") last_rcvd_vlan_id = ET.SubElement(get_next_request, "last-rcvd-vlan-id") last_rcvd_vlan_id.text = kwargs.pop('last_rcvd_vlan_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vlan_brief_input_request_type_get_next_request_last_rcvd_vlan_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vlan_brief", "=", "ET", ".", "Element", "(", "\"get_vlan_brief\"", ")", "config", "=", "get_vlan_brief", "input", "=", "ET", ".", "SubElement", "(", "get_vlan_brief", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_next_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-next-request\"", ")", "last_rcvd_vlan_id", "=", "ET", ".", "SubElement", "(", "get_next_request", ",", "\"last-rcvd-vlan-id\"", ")", "last_rcvd_vlan_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'last_rcvd_vlan_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
47.857143
19.357143
def query_remote_ref(self, remote, ref): """Query remote repo about given ref. :return: ``('tag', sha)`` if ref is a tag in remote ``('branch', sha)`` if ref is branch (aka "head") in remote ``(None, ref)`` if ref does not exist in remote. This happens notably if ref if a commit sha (they can't be queried) """ out = self.log_call(['git', 'ls-remote', remote, ref], cwd=self.cwd, callwith=subprocess.check_output).strip() for sha, fullref in (l.split() for l in out.splitlines()): if fullref == 'refs/heads/' + ref: return 'branch', sha elif fullref == 'refs/tags/' + ref: return 'tag', sha elif fullref == ref and ref == 'HEAD': return 'HEAD', sha return None, ref
[ "def", "query_remote_ref", "(", "self", ",", "remote", ",", "ref", ")", ":", "out", "=", "self", ".", "log_call", "(", "[", "'git'", ",", "'ls-remote'", ",", "remote", ",", "ref", "]", ",", "cwd", "=", "self", ".", "cwd", ",", "callwith", "=", "subprocess", ".", "check_output", ")", ".", "strip", "(", ")", "for", "sha", ",", "fullref", "in", "(", "l", ".", "split", "(", ")", "for", "l", "in", "out", ".", "splitlines", "(", ")", ")", ":", "if", "fullref", "==", "'refs/heads/'", "+", "ref", ":", "return", "'branch'", ",", "sha", "elif", "fullref", "==", "'refs/tags/'", "+", "ref", ":", "return", "'tag'", ",", "sha", "elif", "fullref", "==", "ref", "and", "ref", "==", "'HEAD'", ":", "return", "'HEAD'", ",", "sha", "return", "None", ",", "ref" ]
49.333333
14.333333
def submit_jobs(root_dir, jobs, sgeargs=None): """ Submit each of the passed jobs to the SGE server, using the passed directory as root for SGE output. - root_dir Path to output directory - jobs List of Job objects """ waiting = list(jobs) # List of jobs still to be done # Loop over the list of pending jobs, while there still are any while len(waiting) > 0: # extract submittable jobs submittable = extract_submittable_jobs(waiting) # run those jobs submit_safe_jobs(root_dir, submittable, sgeargs) # remove those from the waiting list for job in submittable: waiting.remove(job)
[ "def", "submit_jobs", "(", "root_dir", ",", "jobs", ",", "sgeargs", "=", "None", ")", ":", "waiting", "=", "list", "(", "jobs", ")", "# List of jobs still to be done", "# Loop over the list of pending jobs, while there still are any", "while", "len", "(", "waiting", ")", ">", "0", ":", "# extract submittable jobs", "submittable", "=", "extract_submittable_jobs", "(", "waiting", ")", "# run those jobs", "submit_safe_jobs", "(", "root_dir", ",", "submittable", ",", "sgeargs", ")", "# remove those from the waiting list", "for", "job", "in", "submittable", ":", "waiting", ".", "remove", "(", "job", ")" ]
40.588235
11.823529
def OnFindToolbarToggle(self, event): """Search toolbar toggle event handler""" self.main_window.find_toolbar.SetGripperVisible(True) find_toolbar_info = self.main_window._mgr.GetPane("find_toolbar") self._toggle_pane(find_toolbar_info) event.Skip()
[ "def", "OnFindToolbarToggle", "(", "self", ",", "event", ")", ":", "self", ".", "main_window", ".", "find_toolbar", ".", "SetGripperVisible", "(", "True", ")", "find_toolbar_info", "=", "self", ".", "main_window", ".", "_mgr", ".", "GetPane", "(", "\"find_toolbar\"", ")", "self", ".", "_toggle_pane", "(", "find_toolbar_info", ")", "event", ".", "Skip", "(", ")" ]
28.4
24.1
def times_csv(path, times, annotations=None, delimiter=',', fmt='%0.3f'): r"""Save time steps as in CSV format. This can be used to store the output of a beat-tracker or segmentation algorithm. If only `times` are provided, the file will contain each value of `times` on a row:: times[0]\n times[1]\n times[2]\n ... If `annotations` are also provided, the file will contain delimiter-separated values:: times[0],annotations[0]\n times[1],annotations[1]\n times[2],annotations[2]\n ... Parameters ---------- path : string path to save the output CSV file times : list-like of floats list of frame numbers for beat events annotations : None or list-like optional annotations for each time step delimiter : str character to separate fields fmt : str format-string for rendering time Raises ------ ParameterError if `annotations` is not `None` and length does not match `times` Examples -------- Write beat-tracker time to CSV >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time') >>> librosa.output.times_csv('beat_times.csv', beats) """ if annotations is not None and len(annotations) != len(times): raise ParameterError('len(annotations) != len(times)') with open(path, 'w') as output_file: writer = csv.writer(output_file, delimiter=delimiter) if annotations is None: for t in times: writer.writerow([fmt % t]) else: for t, lab in zip(times, annotations): writer.writerow([(fmt % t), lab])
[ "def", "times_csv", "(", "path", ",", "times", ",", "annotations", "=", "None", ",", "delimiter", "=", "','", ",", "fmt", "=", "'%0.3f'", ")", ":", "if", "annotations", "is", "not", "None", "and", "len", "(", "annotations", ")", "!=", "len", "(", "times", ")", ":", "raise", "ParameterError", "(", "'len(annotations) != len(times)'", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "output_file", ":", "writer", "=", "csv", ".", "writer", "(", "output_file", ",", "delimiter", "=", "delimiter", ")", "if", "annotations", "is", "None", ":", "for", "t", "in", "times", ":", "writer", ".", "writerow", "(", "[", "fmt", "%", "t", "]", ")", "else", ":", "for", "t", ",", "lab", "in", "zip", "(", "times", ",", "annotations", ")", ":", "writer", ".", "writerow", "(", "[", "(", "fmt", "%", "t", ")", ",", "lab", "]", ")" ]
26.507692
22.138462
def message(self, data): """Function to display messages to the user """ msg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, data) msg.set_resizable(1) msg.set_title(self.dialog_title) self.img.set_from_file(self.sun_icon) msg.set_image(self.img) msg.show_all() msg.run() msg.destroy()
[ "def", "message", "(", "self", ",", "data", ")", ":", "msg", "=", "gtk", ".", "MessageDialog", "(", "None", ",", "gtk", ".", "DIALOG_MODAL", ",", "gtk", ".", "MESSAGE_INFO", ",", "gtk", ".", "BUTTONS_CLOSE", ",", "data", ")", "msg", ".", "set_resizable", "(", "1", ")", "msg", ".", "set_title", "(", "self", ".", "dialog_title", ")", "self", ".", "img", ".", "set_from_file", "(", "self", ".", "sun_icon", ")", "msg", ".", "set_image", "(", "self", ".", "img", ")", "msg", ".", "show_all", "(", ")", "msg", ".", "run", "(", ")", "msg", ".", "destroy", "(", ")" ]
34.916667
12.583333
def delete(self, ids): """ Method to delete network-ipv4's by their ids :param ids: Identifiers of network-ipv4's :return: None """ url = build_uri_with_ids('api/v3/networkv4/%s/', ids) return super(ApiNetworkIPv4, self).delete(url)
[ "def", "delete", "(", "self", ",", "ids", ")", ":", "url", "=", "build_uri_with_ids", "(", "'api/v3/networkv4/%s/'", ",", "ids", ")", "return", "super", "(", "ApiNetworkIPv4", ",", "self", ")", ".", "delete", "(", "url", ")" ]
28.1
17.3
def reset(self): """ Resets the value of config item to its default value. """ old_value = self._value old_raw_str_value = self.raw_str_value self._value = not_set self.raw_str_value = not_set new_value = self._value if old_value is not_set: # Nothing to report return if self.section: self.section.dispatch_event( self.section.hooks.item_value_changed, item=self, old_value=old_value, new_value=new_value, old_raw_str_value=old_raw_str_value, new_raw_str_value=self.raw_str_value, )
[ "def", "reset", "(", "self", ")", ":", "old_value", "=", "self", ".", "_value", "old_raw_str_value", "=", "self", ".", "raw_str_value", "self", ".", "_value", "=", "not_set", "self", ".", "raw_str_value", "=", "not_set", "new_value", "=", "self", ".", "_value", "if", "old_value", "is", "not_set", ":", "# Nothing to report", "return", "if", "self", ".", "section", ":", "self", ".", "section", ".", "dispatch_event", "(", "self", ".", "section", ".", "hooks", ".", "item_value_changed", ",", "item", "=", "self", ",", "old_value", "=", "old_value", ",", "new_value", "=", "new_value", ",", "old_raw_str_value", "=", "old_raw_str_value", ",", "new_raw_str_value", "=", "self", ".", "raw_str_value", ",", ")" ]
27.48
15.48
def toggleCollapseBefore( self ): """ Collapses the splitter before this handle. """ if ( self.isCollapsed() ): self.uncollapse() else: self.collapse( XSplitterHandle.CollapseDirection.Before )
[ "def", "toggleCollapseBefore", "(", "self", ")", ":", "if", "(", "self", ".", "isCollapsed", "(", ")", ")", ":", "self", ".", "uncollapse", "(", ")", "else", ":", "self", ".", "collapse", "(", "XSplitterHandle", ".", "CollapseDirection", ".", "Before", ")" ]
31.25
11.25
def readcols(infile, cols=[0, 1, 2, 3], hms=False): """ Read the columns from an ASCII file as numpy arrays. Parameters ---------- infile : str Filename of ASCII file with array data as columns. cols : list of int List of 0-indexed column numbers for columns to be turned into numpy arrays (DEFAULT- [0,1,2,3]). Returns ------- outarr : list of numpy arrays Simple list of numpy arrays in the order as specifed in the 'cols' parameter. """ fin = open(infile,'r') outarr = [] for l in fin.readlines(): l = l.strip() if len(l) == 0 or len(l.split()) < len(cols) or (len(l) > 0 and l[0] == '#' or (l.find("INDEF") > -1)): continue for i in range(10): lnew = l.replace(" "," ") if lnew == l: break else: l = lnew lspl = lnew.split(" ") if len(outarr) == 0: for c in range(len(cols)): outarr.append([]) for c,n in zip(cols,list(range(len(cols)))): if not hms: val = float(lspl[c]) else: val = lspl[c] outarr[n].append(val) fin.close() for n in range(len(cols)): outarr[n] = np.array(outarr[n]) return outarr
[ "def", "readcols", "(", "infile", ",", "cols", "=", "[", "0", ",", "1", ",", "2", ",", "3", "]", ",", "hms", "=", "False", ")", ":", "fin", "=", "open", "(", "infile", ",", "'r'", ")", "outarr", "=", "[", "]", "for", "l", "in", "fin", ".", "readlines", "(", ")", ":", "l", "=", "l", ".", "strip", "(", ")", "if", "len", "(", "l", ")", "==", "0", "or", "len", "(", "l", ".", "split", "(", ")", ")", "<", "len", "(", "cols", ")", "or", "(", "len", "(", "l", ")", ">", "0", "and", "l", "[", "0", "]", "==", "'#'", "or", "(", "l", ".", "find", "(", "\"INDEF\"", ")", ">", "-", "1", ")", ")", ":", "continue", "for", "i", "in", "range", "(", "10", ")", ":", "lnew", "=", "l", ".", "replace", "(", "\" \"", ",", "\" \"", ")", "if", "lnew", "==", "l", ":", "break", "else", ":", "l", "=", "lnew", "lspl", "=", "lnew", ".", "split", "(", "\" \"", ")", "if", "len", "(", "outarr", ")", "==", "0", ":", "for", "c", "in", "range", "(", "len", "(", "cols", ")", ")", ":", "outarr", ".", "append", "(", "[", "]", ")", "for", "c", ",", "n", "in", "zip", "(", "cols", ",", "list", "(", "range", "(", "len", "(", "cols", ")", ")", ")", ")", ":", "if", "not", "hms", ":", "val", "=", "float", "(", "lspl", "[", "c", "]", ")", "else", ":", "val", "=", "lspl", "[", "c", "]", "outarr", "[", "n", "]", ".", "append", "(", "val", ")", "fin", ".", "close", "(", ")", "for", "n", "in", "range", "(", "len", "(", "cols", ")", ")", ":", "outarr", "[", "n", "]", "=", "np", ".", "array", "(", "outarr", "[", "n", "]", ")", "return", "outarr" ]
28.068182
21.386364
def _format_object(obj, format_type=None): """Depending on settings calls either `format_keys` or `format_field_names`""" if json_api_settings.FORMAT_KEYS is not None: return format_keys(obj, format_type) return format_field_names(obj, format_type)
[ "def", "_format_object", "(", "obj", ",", "format_type", "=", "None", ")", ":", "if", "json_api_settings", ".", "FORMAT_KEYS", "is", "not", "None", ":", "return", "format_keys", "(", "obj", ",", "format_type", ")", "return", "format_field_names", "(", "obj", ",", "format_type", ")" ]
37.714286
14.571429
def bundle(self, app, results_dir): """Create a self-contained application bundle. The bundle will contain the target classes, dependencies and resources. """ assert(isinstance(app, BundleCreate.App)) bundle_dir = self.get_bundle_dir(app.id, results_dir) self.context.log.debug('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot()))) safe_mkdir(bundle_dir, clean=True) classpath = OrderedSet() # Create symlinks for both internal and external dependencies under `lib_dir`. This is # only needed when not creating a deployjar lib_dir = os.path.join(bundle_dir, self.LIBS_DIR) if not app.deployjar: os.mkdir(lib_dir) consolidated_classpath = self.context.products.get_data('consolidated_classpath') classpath.update(ClasspathProducts.create_canonical_classpath( consolidated_classpath, app.target.closure(bfs=True, **self._target_closure_kwargs), lib_dir, internal_classpath_only=False, excludes=app.binary.deploy_excludes, )) bundle_jar = os.path.join(bundle_dir, '{}.jar'.format(app.binary.basename)) with self.monolithic_jar(app.binary, bundle_jar, manifest_classpath=classpath) as jar: self.add_main_manifest_entry(jar, app.binary) # Make classpath complete by adding the monolithic jar. classpath.update([jar.path]) if app.binary.shading_rules: for jar_path in classpath: # In case `jar_path` is a symlink, this is still safe, shaded jar will overwrite jar_path, # original file `jar_path` linked to remains untouched. # TODO run in parallel to speed up self.shade_jar(shading_rules=app.binary.shading_rules, jar_path=jar_path) self.symlink_bundles(app, bundle_dir) return bundle_dir
[ "def", "bundle", "(", "self", ",", "app", ",", "results_dir", ")", ":", "assert", "(", "isinstance", "(", "app", ",", "BundleCreate", ".", "App", ")", ")", "bundle_dir", "=", "self", ".", "get_bundle_dir", "(", "app", ".", "id", ",", "results_dir", ")", "self", ".", "context", ".", "log", ".", "debug", "(", "'creating {}'", ".", "format", "(", "os", ".", "path", ".", "relpath", "(", "bundle_dir", ",", "get_buildroot", "(", ")", ")", ")", ")", "safe_mkdir", "(", "bundle_dir", ",", "clean", "=", "True", ")", "classpath", "=", "OrderedSet", "(", ")", "# Create symlinks for both internal and external dependencies under `lib_dir`. This is", "# only needed when not creating a deployjar", "lib_dir", "=", "os", ".", "path", ".", "join", "(", "bundle_dir", ",", "self", ".", "LIBS_DIR", ")", "if", "not", "app", ".", "deployjar", ":", "os", ".", "mkdir", "(", "lib_dir", ")", "consolidated_classpath", "=", "self", ".", "context", ".", "products", ".", "get_data", "(", "'consolidated_classpath'", ")", "classpath", ".", "update", "(", "ClasspathProducts", ".", "create_canonical_classpath", "(", "consolidated_classpath", ",", "app", ".", "target", ".", "closure", "(", "bfs", "=", "True", ",", "*", "*", "self", ".", "_target_closure_kwargs", ")", ",", "lib_dir", ",", "internal_classpath_only", "=", "False", ",", "excludes", "=", "app", ".", "binary", ".", "deploy_excludes", ",", ")", ")", "bundle_jar", "=", "os", ".", "path", ".", "join", "(", "bundle_dir", ",", "'{}.jar'", ".", "format", "(", "app", ".", "binary", ".", "basename", ")", ")", "with", "self", ".", "monolithic_jar", "(", "app", ".", "binary", ",", "bundle_jar", ",", "manifest_classpath", "=", "classpath", ")", "as", "jar", ":", "self", ".", "add_main_manifest_entry", "(", "jar", ",", "app", ".", "binary", ")", "# Make classpath complete by adding the monolithic jar.", "classpath", ".", "update", "(", "[", "jar", ".", "path", "]", ")", "if", "app", ".", "binary", ".", "shading_rules", ":", "for", "jar_path", "in", "classpath", ":", "# In case `jar_path` is a symlink, this is still safe, shaded jar will overwrite jar_path,", "# original file `jar_path` linked to remains untouched.", "# TODO run in parallel to speed up", "self", ".", "shade_jar", "(", "shading_rules", "=", "app", ".", "binary", ".", "shading_rules", ",", "jar_path", "=", "jar_path", ")", "self", ".", "symlink_bundles", "(", "app", ",", "bundle_dir", ")", "return", "bundle_dir" ]
38.695652
23.5
def __process_by_ccore(self): """! @brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library). """ (self.__clusters, self.__noise, self.__ordering, self.__eps, objects_indexes, objects_core_distances, objects_reachability_distances) = \ wrapper.optics(self.__sample_pointer, self.__eps, self.__minpts, self.__amount_clusters, self.__data_type) self.__optics_objects = [] for i in range(len(objects_indexes)): if objects_core_distances[i] < 0.0: objects_core_distances[i] = None if objects_reachability_distances[i] < 0.0: objects_reachability_distances[i] = None optics_object = optics_descriptor(objects_indexes[i], objects_core_distances[i], objects_reachability_distances[i]) optics_object.processed = True self.__optics_objects.append(optics_object)
[ "def", "__process_by_ccore", "(", "self", ")", ":", "(", "self", ".", "__clusters", ",", "self", ".", "__noise", ",", "self", ".", "__ordering", ",", "self", ".", "__eps", ",", "objects_indexes", ",", "objects_core_distances", ",", "objects_reachability_distances", ")", "=", "wrapper", ".", "optics", "(", "self", ".", "__sample_pointer", ",", "self", ".", "__eps", ",", "self", ".", "__minpts", ",", "self", ".", "__amount_clusters", ",", "self", ".", "__data_type", ")", "self", ".", "__optics_objects", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "objects_indexes", ")", ")", ":", "if", "objects_core_distances", "[", "i", "]", "<", "0.0", ":", "objects_core_distances", "[", "i", "]", "=", "None", "if", "objects_reachability_distances", "[", "i", "]", "<", "0.0", ":", "objects_reachability_distances", "[", "i", "]", "=", "None", "optics_object", "=", "optics_descriptor", "(", "objects_indexes", "[", "i", "]", ",", "objects_core_distances", "[", "i", "]", ",", "objects_reachability_distances", "[", "i", "]", ")", "optics_object", ".", "processed", "=", "True", "self", ".", "__optics_objects", ".", "append", "(", "optics_object", ")" ]
42.863636
28
def spidex_human(variant_obj): """Translate SPIDEX annotation to human readable string.""" if variant_obj.get('spidex') is None: return 'not_reported' elif abs(variant_obj['spidex']) < SPIDEX_HUMAN['low']['pos'][1]: return 'low' elif abs(variant_obj['spidex']) < SPIDEX_HUMAN['medium']['pos'][1]: return 'medium' else: return 'high'
[ "def", "spidex_human", "(", "variant_obj", ")", ":", "if", "variant_obj", ".", "get", "(", "'spidex'", ")", "is", "None", ":", "return", "'not_reported'", "elif", "abs", "(", "variant_obj", "[", "'spidex'", "]", ")", "<", "SPIDEX_HUMAN", "[", "'low'", "]", "[", "'pos'", "]", "[", "1", "]", ":", "return", "'low'", "elif", "abs", "(", "variant_obj", "[", "'spidex'", "]", ")", "<", "SPIDEX_HUMAN", "[", "'medium'", "]", "[", "'pos'", "]", "[", "1", "]", ":", "return", "'medium'", "else", ":", "return", "'high'" ]
37.5
16.8
def connect(self, node="", rpcuser="", rpcpassword="", **kwargs): """ Connect to blockchain network (internal use only) """ if not node: if "node" in self.config: node = self.config["node"] else: raise ValueError("A Blockchain node needs to be provided!") if not rpcuser and "rpcuser" in self.config: rpcuser = self.config["rpcuser"] if not rpcpassword and "rpcpassword" in self.config: rpcpassword = self.config["rpcpassword"] self.rpc = self.rpc_class(node, rpcuser, rpcpassword, **kwargs)
[ "def", "connect", "(", "self", ",", "node", "=", "\"\"", ",", "rpcuser", "=", "\"\"", ",", "rpcpassword", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "if", "not", "node", ":", "if", "\"node\"", "in", "self", ".", "config", ":", "node", "=", "self", ".", "config", "[", "\"node\"", "]", "else", ":", "raise", "ValueError", "(", "\"A Blockchain node needs to be provided!\"", ")", "if", "not", "rpcuser", "and", "\"rpcuser\"", "in", "self", ".", "config", ":", "rpcuser", "=", "self", ".", "config", "[", "\"rpcuser\"", "]", "if", "not", "rpcpassword", "and", "\"rpcpassword\"", "in", "self", ".", "config", ":", "rpcpassword", "=", "self", ".", "config", "[", "\"rpcpassword\"", "]", "self", ".", "rpc", "=", "self", ".", "rpc_class", "(", "node", ",", "rpcuser", ",", "rpcpassword", ",", "*", "*", "kwargs", ")" ]
37.9375
19.1875
def getCompressMod(self): ''' getCompressMod - Return the module used for compression on this field @return <module> - The module for compression ''' if self.compressMode == COMPRESS_MODE_ZLIB: return zlib if self.compressMode == COMPRESS_MODE_BZ2: return bz2 if self.compressMode == COMPRESS_MODE_LZMA: # Since lzma is not provided by python core in python2, search out some common alternatives. # Throw exception if we can find no lzma implementation. global _lzmaMod if _lzmaMod is not None: return _lzmaMod try: import lzma _lzmaMod = lzma return _lzmaMod except: # Python2 does not provide "lzma" module, search for common alternatives try: from backports import lzma _lzmaMod = lzma return _lzmaMod except: pass try: import lzmaffi as lzma _lzmaMod = lzma return _lzmaMod except: pass raise ImportError("Requested compress mode is lzma and could not find a module providing lzma support. Tried: 'lzma', 'backports.lzma', 'lzmaffi' and none of these were available. Please install one of these, or to use an unlisted implementation, set IndexedRedis.fields.compressed._lzmaMod to the module (must implement standard python compression interface)")
[ "def", "getCompressMod", "(", "self", ")", ":", "if", "self", ".", "compressMode", "==", "COMPRESS_MODE_ZLIB", ":", "return", "zlib", "if", "self", ".", "compressMode", "==", "COMPRESS_MODE_BZ2", ":", "return", "bz2", "if", "self", ".", "compressMode", "==", "COMPRESS_MODE_LZMA", ":", "# Since lzma is not provided by python core in python2, search out some common alternatives.", "# Throw exception if we can find no lzma implementation.", "global", "_lzmaMod", "if", "_lzmaMod", "is", "not", "None", ":", "return", "_lzmaMod", "try", ":", "import", "lzma", "_lzmaMod", "=", "lzma", "return", "_lzmaMod", "except", ":", "# Python2 does not provide \"lzma\" module, search for common alternatives", "try", ":", "from", "backports", "import", "lzma", "_lzmaMod", "=", "lzma", "return", "_lzmaMod", "except", ":", "pass", "try", ":", "import", "lzmaffi", "as", "lzma", "_lzmaMod", "=", "lzma", "return", "_lzmaMod", "except", ":", "pass", "raise", "ImportError", "(", "\"Requested compress mode is lzma and could not find a module providing lzma support. Tried: 'lzma', 'backports.lzma', 'lzmaffi' and none of these were available. Please install one of these, or to use an unlisted implementation, set IndexedRedis.fields.compressed._lzmaMod to the module (must implement standard python compression interface)\"", ")" ]
35.428571
32.571429
def set_bookmarks(self, bookmarks): """ Store the sequence of bookmarks `bookmarks`. Causes signals to be fired to reflect the changes. .. note:: This should normally not be used. It does not mitigate the race condition between clients concurrently modifying the bookmarks and may lead to data loss. Use :meth:`add_bookmark`, :meth:`discard_bookmark` and :meth:`update_bookmark` instead. This method still has use-cases (modifying the bookmarklist at large, e.g. by syncing the remote store with local data). """ with (yield from self._lock): yield from self._set_bookmarks(bookmarks) self._diff_emit_update(bookmarks)
[ "def", "set_bookmarks", "(", "self", ",", "bookmarks", ")", ":", "with", "(", "yield", "from", "self", ".", "_lock", ")", ":", "yield", "from", "self", ".", "_set_bookmarks", "(", "bookmarks", ")", "self", ".", "_diff_emit_update", "(", "bookmarks", ")" ]
44.5
17.5
def pass_defaults(func): """Decorator that returns a function named wrapper. When invoked, wrapper invokes func with default kwargs appended. Parameters ---------- func : callable The function to append the default kwargs to """ @functools.wraps(func) def wrapper(self, *args, **kwargs): merged = {} merged.update(self.defaults) merged.update(kwargs) return func(self, *args, **merged) return wrapper
[ "def", "pass_defaults", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "merged", "=", "{", "}", "merged", ".", "update", "(", "self", ".", "defaults", ")", "merged", ".", "update", "(", "kwargs", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "merged", ")", "return", "wrapper" ]
27.176471
16.705882
def plot_net(fignum): """ Draws circle and tick marks for equal area projection. """ # make the perimeter plt.figure(num=fignum,) plt.clf() plt.axis("off") Dcirc = np.arange(0, 361.) Icirc = np.zeros(361, 'f') Xcirc, Ycirc = [], [] for k in range(361): XY = pmag.dimap(Dcirc[k], Icirc[k]) Xcirc.append(XY[0]) Ycirc.append(XY[1]) plt.plot(Xcirc, Ycirc, 'k') # put on the tick marks Xsym, Ysym = [], [] for I in range(10, 100, 10): XY = pmag.dimap(0., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(90., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(180., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(270., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') for D in range(0, 360, 10): Xtick, Ytick = [], [] for I in range(4): XY = pmag.dimap(D, I) Xtick.append(XY[0]) Ytick.append(XY[1]) plt.plot(Xtick, Ytick, 'k') plt.axis("equal") plt.axis((-1.05, 1.05, -1.05, 1.05))
[ "def", "plot_net", "(", "fignum", ")", ":", "# make the perimeter", "plt", ".", "figure", "(", "num", "=", "fignum", ",", ")", "plt", ".", "clf", "(", ")", "plt", ".", "axis", "(", "\"off\"", ")", "Dcirc", "=", "np", ".", "arange", "(", "0", ",", "361.", ")", "Icirc", "=", "np", ".", "zeros", "(", "361", ",", "'f'", ")", "Xcirc", ",", "Ycirc", "=", "[", "]", ",", "[", "]", "for", "k", "in", "range", "(", "361", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "Dcirc", "[", "k", "]", ",", "Icirc", "[", "k", "]", ")", "Xcirc", ".", "append", "(", "XY", "[", "0", "]", ")", "Ycirc", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xcirc", ",", "Ycirc", ",", "'k'", ")", "# put on the tick marks", "Xsym", ",", "Ysym", "=", "[", "]", ",", "[", "]", "for", "I", "in", "range", "(", "10", ",", "100", ",", "10", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "0.", ",", "I", ")", "Xsym", ".", "append", "(", "XY", "[", "0", "]", ")", "Ysym", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xsym", ",", "Ysym", ",", "'k+'", ")", "Xsym", ",", "Ysym", "=", "[", "]", ",", "[", "]", "for", "I", "in", "range", "(", "10", ",", "90", ",", "10", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "90.", ",", "I", ")", "Xsym", ".", "append", "(", "XY", "[", "0", "]", ")", "Ysym", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xsym", ",", "Ysym", ",", "'k+'", ")", "Xsym", ",", "Ysym", "=", "[", "]", ",", "[", "]", "for", "I", "in", "range", "(", "10", ",", "90", ",", "10", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "180.", ",", "I", ")", "Xsym", ".", "append", "(", "XY", "[", "0", "]", ")", "Ysym", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xsym", ",", "Ysym", ",", "'k+'", ")", "Xsym", ",", "Ysym", "=", "[", "]", ",", "[", "]", "for", "I", "in", "range", "(", "10", ",", "90", ",", "10", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "270.", ",", "I", ")", "Xsym", ".", "append", "(", "XY", "[", "0", "]", ")", "Ysym", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xsym", ",", "Ysym", ",", "'k+'", ")", "for", "D", "in", "range", "(", "0", ",", "360", ",", "10", ")", ":", "Xtick", ",", "Ytick", "=", "[", "]", ",", "[", "]", "for", "I", "in", "range", "(", "4", ")", ":", "XY", "=", "pmag", ".", "dimap", "(", "D", ",", "I", ")", "Xtick", ".", "append", "(", "XY", "[", "0", "]", ")", "Ytick", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "plot", "(", "Xtick", ",", "Ytick", ",", "'k'", ")", "plt", ".", "axis", "(", "\"equal\"", ")", "plt", ".", "axis", "(", "(", "-", "1.05", ",", "1.05", ",", "-", "1.05", ",", "1.05", ")", ")" ]
26.519231
13.019231
def get_query_batch_request(self, batch_id, job_id=None): """ Fetch the request sent for the batch. Note should only used for query batches """ if not job_id: job_id = self.lookup_job_id(batch_id) url = self.endpoint + "/job/{}/batch/{}/request".format(job_id, batch_id) resp = requests.get(url, headers=self.headers()) self.check_status(resp) return resp.text
[ "def", "get_query_batch_request", "(", "self", ",", "batch_id", ",", "job_id", "=", "None", ")", ":", "if", "not", "job_id", ":", "job_id", "=", "self", ".", "lookup_job_id", "(", "batch_id", ")", "url", "=", "self", ".", "endpoint", "+", "\"/job/{}/batch/{}/request\"", ".", "format", "(", "job_id", ",", "batch_id", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "headers", "(", ")", ")", "self", ".", "check_status", "(", "resp", ")", "return", "resp", ".", "text" ]
45.888889
18.444444
def _fetch(self): """forces update of a local cached copy of the real object (regardless of the preference setting self.cache)""" if not self._obcache_current: from distob import engine ax = self._distaxis self._obcache = concatenate([ra._ob for ra in self._subarrays], ax) # let subarray obcaches and main obcache be views on same memory: for i in range(self._n): ix = [slice(None)] * self.ndim ix[ax] = slice(self._si[i], self._si[i+1]) self._subarrays[i]._obcache = self._obcache[tuple(ix)] self._obcache_current = True # now prefer local processing: self.__engine_affinity__ = ( engine.eid, self.__engine_affinity__[1])
[ "def", "_fetch", "(", "self", ")", ":", "if", "not", "self", ".", "_obcache_current", ":", "from", "distob", "import", "engine", "ax", "=", "self", ".", "_distaxis", "self", ".", "_obcache", "=", "concatenate", "(", "[", "ra", ".", "_ob", "for", "ra", "in", "self", ".", "_subarrays", "]", ",", "ax", ")", "# let subarray obcaches and main obcache be views on same memory:", "for", "i", "in", "range", "(", "self", ".", "_n", ")", ":", "ix", "=", "[", "slice", "(", "None", ")", "]", "*", "self", ".", "ndim", "ix", "[", "ax", "]", "=", "slice", "(", "self", ".", "_si", "[", "i", "]", ",", "self", ".", "_si", "[", "i", "+", "1", "]", ")", "self", ".", "_subarrays", "[", "i", "]", ".", "_obcache", "=", "self", ".", "_obcache", "[", "tuple", "(", "ix", ")", "]", "self", ".", "_obcache_current", "=", "True", "# now prefer local processing:", "self", ".", "__engine_affinity__", "=", "(", "engine", ".", "eid", ",", "self", ".", "__engine_affinity__", "[", "1", "]", ")" ]
49.75
12.125
def new_format(self, navbar: BeautifulSoup, content: BeautifulSoup) -> List[str]: """ Extracts email message information if it uses the new Mailman format Args: content: BeautifulSoup Returns: List[str] """ sender = content.find(id='from').text.split('via')[0][6:].strip() date_str = content.find(id='date').text.split(': ')[1].strip() date = parsedate_to_datetime(date_str).isoformat()[:19] body = content.find(id='body').text.strip() nxt, rep_to = None, None links = navbar.findAll('a') for l in links: if 'Next in thread' in str(l): nxt = '/'.join(self.email_url.split('/')[:-1]) + '/' + l['href'] nxt = nxt[1:] if nxt[0] == '/' else nxt elif 'reply to' in str(l): rep_to = '/'.join(self.email_url.split('/')[:-1]) + '/' + l['href'] rep_to = rep_to[1:] if rep_to[0] == '/' else rep_to return [str(i) for i in [sender, date, body, nxt, rep_to]]
[ "def", "new_format", "(", "self", ",", "navbar", ":", "BeautifulSoup", ",", "content", ":", "BeautifulSoup", ")", "->", "List", "[", "str", "]", ":", "sender", "=", "content", ".", "find", "(", "id", "=", "'from'", ")", ".", "text", ".", "split", "(", "'via'", ")", "[", "0", "]", "[", "6", ":", "]", ".", "strip", "(", ")", "date_str", "=", "content", ".", "find", "(", "id", "=", "'date'", ")", ".", "text", ".", "split", "(", "': '", ")", "[", "1", "]", ".", "strip", "(", ")", "date", "=", "parsedate_to_datetime", "(", "date_str", ")", ".", "isoformat", "(", ")", "[", ":", "19", "]", "body", "=", "content", ".", "find", "(", "id", "=", "'body'", ")", ".", "text", ".", "strip", "(", ")", "nxt", ",", "rep_to", "=", "None", ",", "None", "links", "=", "navbar", ".", "findAll", "(", "'a'", ")", "for", "l", "in", "links", ":", "if", "'Next in thread'", "in", "str", "(", "l", ")", ":", "nxt", "=", "'/'", ".", "join", "(", "self", ".", "email_url", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "+", "'/'", "+", "l", "[", "'href'", "]", "nxt", "=", "nxt", "[", "1", ":", "]", "if", "nxt", "[", "0", "]", "==", "'/'", "else", "nxt", "elif", "'reply to'", "in", "str", "(", "l", ")", ":", "rep_to", "=", "'/'", ".", "join", "(", "self", ".", "email_url", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "+", "'/'", "+", "l", "[", "'href'", "]", "rep_to", "=", "rep_to", "[", "1", ":", "]", "if", "rep_to", "[", "0", "]", "==", "'/'", "else", "rep_to", "return", "[", "str", "(", "i", ")", "for", "i", "in", "[", "sender", ",", "date", ",", "body", ",", "nxt", ",", "rep_to", "]", "]" ]
41.84
22
def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict): """Migrate case information from archive.""" # update collaborators collaborators = list(set(scout_case['collaborators'] + archive_data['collaborators'])) if collaborators != scout_case['collaborators']: LOG.info(f"set collaborators: {', '.join(collaborators)}") scout_case['collaborators'] = collaborators # update assignees if len(scout_case.get('assignees', [])) == 0: scout_user = adapter.user(archive_data['assignee']) if scout_user: scout_case['assignees'] = [archive_data['assignee']] else: LOG.warning(f"{archive_data['assignee']}: unable to find assigned user") # add/update suspected/causative variants for key in ['suspects', 'causatives']: scout_case[key] = scout_case.get(key, []) for archive_variant in archive_data[key]: variant_id = get_variantid(archive_variant, scout_case['_id']) scout_variant = adapter.variant(variant_id) if scout_variant: if scout_variant['_id'] in scout_case[key]: LOG.info(f"{scout_variant['_id']}: variant already in {key}") else: LOG.info(f"{scout_variant['_id']}: add to {key}") scout_variant[key].append(scout_variant['_id']) else: LOG.warning(f"{scout_variant['_id']}: unable to find variant ({key})") scout_variant[key].append(variant_id) if not scout_case.get('synopsis'): # update synopsis scout_case['synopsis'] = archive_data['synopsis'] scout_case['is_migrated'] = True adapter.case_collection.find_one_and_replace( {'_id': scout_case['_id']}, scout_case, ) # add/update phenotype groups/terms scout_institute = adapter.institute(scout_case['owner']) scout_user = adapter.user('[email protected]') for key in ['phenotype_terms', 'phenotype_groups']: for archive_term in archive_data[key]: adapter.add_phenotype( institute=scout_institute, case=scout_case, user=scout_user, link=f"/{scout_case['owner']}/{scout_case['display_name']}", hpo_term=archive_term['phenotype_id'], is_group=key == 'phenotype_groups', )
[ "def", "migrate_case", "(", "adapter", ":", "MongoAdapter", ",", "scout_case", ":", "dict", ",", "archive_data", ":", "dict", ")", ":", "# update collaborators", "collaborators", "=", "list", "(", "set", "(", "scout_case", "[", "'collaborators'", "]", "+", "archive_data", "[", "'collaborators'", "]", ")", ")", "if", "collaborators", "!=", "scout_case", "[", "'collaborators'", "]", ":", "LOG", ".", "info", "(", "f\"set collaborators: {', '.join(collaborators)}\"", ")", "scout_case", "[", "'collaborators'", "]", "=", "collaborators", "# update assignees", "if", "len", "(", "scout_case", ".", "get", "(", "'assignees'", ",", "[", "]", ")", ")", "==", "0", ":", "scout_user", "=", "adapter", ".", "user", "(", "archive_data", "[", "'assignee'", "]", ")", "if", "scout_user", ":", "scout_case", "[", "'assignees'", "]", "=", "[", "archive_data", "[", "'assignee'", "]", "]", "else", ":", "LOG", ".", "warning", "(", "f\"{archive_data['assignee']}: unable to find assigned user\"", ")", "# add/update suspected/causative variants", "for", "key", "in", "[", "'suspects'", ",", "'causatives'", "]", ":", "scout_case", "[", "key", "]", "=", "scout_case", ".", "get", "(", "key", ",", "[", "]", ")", "for", "archive_variant", "in", "archive_data", "[", "key", "]", ":", "variant_id", "=", "get_variantid", "(", "archive_variant", ",", "scout_case", "[", "'_id'", "]", ")", "scout_variant", "=", "adapter", ".", "variant", "(", "variant_id", ")", "if", "scout_variant", ":", "if", "scout_variant", "[", "'_id'", "]", "in", "scout_case", "[", "key", "]", ":", "LOG", ".", "info", "(", "f\"{scout_variant['_id']}: variant already in {key}\"", ")", "else", ":", "LOG", ".", "info", "(", "f\"{scout_variant['_id']}: add to {key}\"", ")", "scout_variant", "[", "key", "]", ".", "append", "(", "scout_variant", "[", "'_id'", "]", ")", "else", ":", "LOG", ".", "warning", "(", "f\"{scout_variant['_id']}: unable to find variant ({key})\"", ")", "scout_variant", "[", "key", "]", ".", "append", "(", "variant_id", ")", "if", "not", "scout_case", ".", "get", "(", "'synopsis'", ")", ":", "# update synopsis", "scout_case", "[", "'synopsis'", "]", "=", "archive_data", "[", "'synopsis'", "]", "scout_case", "[", "'is_migrated'", "]", "=", "True", "adapter", ".", "case_collection", ".", "find_one_and_replace", "(", "{", "'_id'", ":", "scout_case", "[", "'_id'", "]", "}", ",", "scout_case", ",", ")", "# add/update phenotype groups/terms", "scout_institute", "=", "adapter", ".", "institute", "(", "scout_case", "[", "'owner'", "]", ")", "scout_user", "=", "adapter", ".", "user", "(", "'[email protected]'", ")", "for", "key", "in", "[", "'phenotype_terms'", ",", "'phenotype_groups'", "]", ":", "for", "archive_term", "in", "archive_data", "[", "key", "]", ":", "adapter", ".", "add_phenotype", "(", "institute", "=", "scout_institute", ",", "case", "=", "scout_case", ",", "user", "=", "scout_user", ",", "link", "=", "f\"/{scout_case['owner']}/{scout_case['display_name']}\"", ",", "hpo_term", "=", "archive_term", "[", "'phenotype_id'", "]", ",", "is_group", "=", "key", "==", "'phenotype_groups'", ",", ")" ]
43.254545
19.909091
def frame(y, frame_length=2048, hop_length=512): '''Slice a time series into overlapping frames. This implementation uses low-level stride manipulation to avoid redundant copies of the time series data. Parameters ---------- y : np.ndarray [shape=(n,)] Time series to frame. Must be one-dimensional and contiguous in memory. frame_length : int > 0 [scalar] Length of the frame in samples hop_length : int > 0 [scalar] Number of samples to hop between frames Returns ------- y_frames : np.ndarray [shape=(frame_length, N_FRAMES)] An array of frames sampled from `y`: `y_frames[i, j] == y[j * hop_length + i]` Raises ------ ParameterError If `y` is not contiguous in memory, not an `np.ndarray`, or not one-dimensional. See `np.ascontiguous()` for details. If `hop_length < 1`, frames cannot advance. If `len(y) < frame_length`. Examples -------- Extract 2048-sample frames from `y` with a hop of 64 samples per frame >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.util.frame(y, frame_length=2048, hop_length=64) array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07], [ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06], ..., [ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06], [ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32) ''' if not isinstance(y, np.ndarray): raise ParameterError('Input must be of type numpy.ndarray, ' 'given type(y)={}'.format(type(y))) if y.ndim != 1: raise ParameterError('Input must be one-dimensional, ' 'given y.ndim={}'.format(y.ndim)) if len(y) < frame_length: raise ParameterError('Buffer is too short (n={:d})' ' for frame_length={:d}'.format(len(y), frame_length)) if hop_length < 1: raise ParameterError('Invalid hop_length: {:d}'.format(hop_length)) if not y.flags['C_CONTIGUOUS']: raise ParameterError('Input buffer must be contiguous.') # Compute the number of frames that will fit. The end may get truncated. n_frames = 1 + int((len(y) - frame_length) / hop_length) # Vertical stride is one sample # Horizontal stride is `hop_length` samples y_frames = as_strided(y, shape=(frame_length, n_frames), strides=(y.itemsize, hop_length * y.itemsize)) return y_frames
[ "def", "frame", "(", "y", ",", "frame_length", "=", "2048", ",", "hop_length", "=", "512", ")", ":", "if", "not", "isinstance", "(", "y", ",", "np", ".", "ndarray", ")", ":", "raise", "ParameterError", "(", "'Input must be of type numpy.ndarray, '", "'given type(y)={}'", ".", "format", "(", "type", "(", "y", ")", ")", ")", "if", "y", ".", "ndim", "!=", "1", ":", "raise", "ParameterError", "(", "'Input must be one-dimensional, '", "'given y.ndim={}'", ".", "format", "(", "y", ".", "ndim", ")", ")", "if", "len", "(", "y", ")", "<", "frame_length", ":", "raise", "ParameterError", "(", "'Buffer is too short (n={:d})'", "' for frame_length={:d}'", ".", "format", "(", "len", "(", "y", ")", ",", "frame_length", ")", ")", "if", "hop_length", "<", "1", ":", "raise", "ParameterError", "(", "'Invalid hop_length: {:d}'", ".", "format", "(", "hop_length", ")", ")", "if", "not", "y", ".", "flags", "[", "'C_CONTIGUOUS'", "]", ":", "raise", "ParameterError", "(", "'Input buffer must be contiguous.'", ")", "# Compute the number of frames that will fit. The end may get truncated.", "n_frames", "=", "1", "+", "int", "(", "(", "len", "(", "y", ")", "-", "frame_length", ")", "/", "hop_length", ")", "# Vertical stride is one sample", "# Horizontal stride is `hop_length` samples", "y_frames", "=", "as_strided", "(", "y", ",", "shape", "=", "(", "frame_length", ",", "n_frames", ")", ",", "strides", "=", "(", "y", ".", "itemsize", ",", "hop_length", "*", "y", ".", "itemsize", ")", ")", "return", "y_frames" ]
34.121622
25.472973
def confd_state_internal_callpoints_notification_stream_replay_registration_type_file_file(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") callpoints = ET.SubElement(internal, "callpoints") notification_stream_replay = ET.SubElement(callpoints, "notification-stream-replay") name_key = ET.SubElement(notification_stream_replay, "name") name_key.text = kwargs.pop('name') registration_type = ET.SubElement(notification_stream_replay, "registration-type") file = ET.SubElement(registration_type, "file") file = ET.SubElement(file, "file") file.text = kwargs.pop('file') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_internal_callpoints_notification_stream_replay_registration_type_file_file", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "xmlns", "=", "\"http://tail-f.com/yang/confd-monitoring\"", ")", "internal", "=", "ET", ".", "SubElement", "(", "confd_state", ",", "\"internal\"", ")", "callpoints", "=", "ET", ".", "SubElement", "(", "internal", ",", "\"callpoints\"", ")", "notification_stream_replay", "=", "ET", ".", "SubElement", "(", "callpoints", ",", "\"notification-stream-replay\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "notification_stream_replay", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "registration_type", "=", "ET", ".", "SubElement", "(", "notification_stream_replay", ",", "\"registration-type\"", ")", "file", "=", "ET", ".", "SubElement", "(", "registration_type", ",", "\"file\"", ")", "file", "=", "ET", ".", "SubElement", "(", "file", ",", "\"file\"", ")", "file", ".", "text", "=", "kwargs", ".", "pop", "(", "'file'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
54.235294
22.882353
def mget(self, pat=None): """ Get a dictionary mapping of all k:v pairs with key matching glob style expression `pat`. """ if pat is None: return {} expr = re.compile(fnmatch.translate(pat)) m = {} for key in tuple(self.keys()): #make sure the key is a str first if isinstance(key, string_types): if expr.match(key): m[key] = self[key] return m
[ "def", "mget", "(", "self", ",", "pat", "=", "None", ")", ":", "if", "pat", "is", "None", ":", "return", "{", "}", "expr", "=", "re", ".", "compile", "(", "fnmatch", ".", "translate", "(", "pat", ")", ")", "m", "=", "{", "}", "for", "key", "in", "tuple", "(", "self", ".", "keys", "(", ")", ")", ":", "#make sure the key is a str first", "if", "isinstance", "(", "key", ",", "string_types", ")", ":", "if", "expr", ".", "match", "(", "key", ")", ":", "m", "[", "key", "]", "=", "self", "[", "key", "]", "return", "m" ]
31.6
10.666667
def SchemaValidateCtxt(self, reader, options): """Use W3C XSD schema context to validate the document as it is processed. Activation is only possible before the first Read(). If @ctxt is None, then XML Schema validation is deactivated. """ if reader is None: reader__o = None else: reader__o = reader._o ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(reader__o, self._o, options) return ret
[ "def", "SchemaValidateCtxt", "(", "self", ",", "reader", ",", "options", ")", ":", "if", "reader", "is", "None", ":", "reader__o", "=", "None", "else", ":", "reader__o", "=", "reader", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlTextReaderSchemaValidateCtxt", "(", "reader__o", ",", "self", ".", "_o", ",", "options", ")", "return", "ret" ]
50.555556
14.888889