nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
marcharper/python-ternary
a95361820073c1ace36f9cf31f3e4650c404920b
ternary/helpers.py
python
unzip
(l)
return list(zip(*l))
[(a1, b1), ..., (an, bn)] ----> ([a1, ..., an], [b1, ..., bn])
[(a1, b1), ..., (an, bn)] ----> ([a1, ..., an], [b1, ..., bn])
[ "[", "(", "a1", "b1", ")", "...", "(", "an", "bn", ")", "]", "----", ">", "(", "[", "a1", "...", "an", "]", "[", "b1", "...", "bn", "]", ")" ]
def unzip(l): """[(a1, b1), ..., (an, bn)] ----> ([a1, ..., an], [b1, ..., bn])""" return list(zip(*l))
[ "def", "unzip", "(", "l", ")", ":", "return", "list", "(", "zip", "(", "*", "l", ")", ")" ]
https://github.com/marcharper/python-ternary/blob/a95361820073c1ace36f9cf31f3e4650c404920b/ternary/helpers.py#L16-L18
jssimporter/JSSImporter
4f82f4c22aa6be99edfec55acb16d0b52cc03182
JSSImporter.py
python
JSSImporter.add_or_update_static_group
(self, group)
return computer_group
Either add a new group or update existing group.
Either add a new group or update existing group.
[ "Either", "add", "a", "new", "group", "or", "update", "existing", "group", "." ]
def add_or_update_static_group(self, group): """Either add a new group or update existing group.""" # Check for pre-existing group first try: computer_group = self.jss.ComputerGroup(group["name"]) self.output( "Static Computer Group: {} already exists.".format(computer_group.name) ) except jss.GetError: computer_group = jss.ComputerGroup(self.jss, group["name"]) computer_group.save() self.output( "Static Computer Group '{}' created.".format(computer_group.name) ) self.env["jss_changed_objects"]["jss_group_added"].append( computer_group.name ) return computer_group
[ "def", "add_or_update_static_group", "(", "self", ",", "group", ")", ":", "# Check for pre-existing group first", "try", ":", "computer_group", "=", "self", ".", "jss", ".", "ComputerGroup", "(", "group", "[", "\"name\"", "]", ")", "self", ".", "output", "(", "\"Static Computer Group: {} already exists.\"", ".", "format", "(", "computer_group", ".", "name", ")", ")", "except", "jss", ".", "GetError", ":", "computer_group", "=", "jss", ".", "ComputerGroup", "(", "self", ".", "jss", ",", "group", "[", "\"name\"", "]", ")", "computer_group", ".", "save", "(", ")", "self", ".", "output", "(", "\"Static Computer Group '{}' created.\"", ".", "format", "(", "computer_group", ".", "name", ")", ")", "self", ".", "env", "[", "\"jss_changed_objects\"", "]", "[", "\"jss_group_added\"", "]", ".", "append", "(", "computer_group", ".", "name", ")", "return", "computer_group" ]
https://github.com/jssimporter/JSSImporter/blob/4f82f4c22aa6be99edfec55acb16d0b52cc03182/JSSImporter.py#L1160-L1178
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/vod/v20180717/models.py
python
DomainDetailInfo.__init__
(self)
r""" :param Domain: 域名名称。 :type Domain: str :param AccelerateAreaInfos: 加速地区信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AccelerateAreaInfos: list of AccelerateAreaInfo :param DeployStatus: 部署状态,取值有: <li>Online:上线;</li> <li>Deploying:部署中;</li> <li>Locked: 锁定中,出现该状态时,无法对该域名进行部署变更。</li> :type DeployStatus: str :param HTTPSConfig: HTTPS 配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type HTTPSConfig: :class:`tencentcloud.vod.v20180717.models.DomainHTTPSConfig` :param UrlSignatureAuthPolicy: [Key 防盗链](https://cloud.tencent.com/document/product/266/14047)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type UrlSignatureAuthPolicy: :class:`tencentcloud.vod.v20180717.models.UrlSignatureAuthPolicy` :param RefererAuthPolicy: [Referer 防盗链](https://cloud.tencent.com/document/product/266/14046)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type RefererAuthPolicy: :class:`tencentcloud.vod.v20180717.models.RefererAuthPolicy` :param CreateTime: 域名添加到腾讯云点播系统中的时间。 <li>格式按照 ISO 8601标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F)。</li> :type CreateTime: str
r""" :param Domain: 域名名称。 :type Domain: str :param AccelerateAreaInfos: 加速地区信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AccelerateAreaInfos: list of AccelerateAreaInfo :param DeployStatus: 部署状态,取值有: <li>Online:上线;</li> <li>Deploying:部署中;</li> <li>Locked: 锁定中,出现该状态时,无法对该域名进行部署变更。</li> :type DeployStatus: str :param HTTPSConfig: HTTPS 配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type HTTPSConfig: :class:`tencentcloud.vod.v20180717.models.DomainHTTPSConfig` :param UrlSignatureAuthPolicy: [Key 防盗链](https://cloud.tencent.com/document/product/266/14047)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type UrlSignatureAuthPolicy: :class:`tencentcloud.vod.v20180717.models.UrlSignatureAuthPolicy` :param RefererAuthPolicy: [Referer 防盗链](https://cloud.tencent.com/document/product/266/14046)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type RefererAuthPolicy: :class:`tencentcloud.vod.v20180717.models.RefererAuthPolicy` :param CreateTime: 域名添加到腾讯云点播系统中的时间。 <li>格式按照 ISO 8601标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F)。</li> :type CreateTime: str
[ "r", ":", "param", "Domain", ":", "域名名称。", ":", "type", "Domain", ":", "str", ":", "param", "AccelerateAreaInfos", ":", "加速地区信息。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "AccelerateAreaInfos", ":", "list", "of", "AccelerateAreaInfo", ":", "param", "DeployStatus", ":", "部署状态,取值有:", "<li", ">", "Online:上线;<", "/", "li", ">", "<li", ">", "Deploying:部署中;<", "/", "li", ">", "<li", ">", "Locked", ":", "锁定中,出现该状态时,无法对该域名进行部署变更。<", "/", "li", ">", ":", "type", "DeployStatus", ":", "str", ":", "param", "HTTPSConfig", ":", "HTTPS", "配置信息。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "HTTPSConfig", ":", ":", "class", ":", "tencentcloud", ".", "vod", ".", "v20180717", ".", "models", ".", "DomainHTTPSConfig", ":", "param", "UrlSignatureAuthPolicy", ":", "[", "Key", "防盗链", "]", "(", "https", ":", "//", "cloud", ".", "tencent", ".", "com", "/", "document", "/", "product", "/", "266", "/", "14047", ")", "配置信息。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "UrlSignatureAuthPolicy", ":", ":", "class", ":", "tencentcloud", ".", "vod", ".", "v20180717", ".", "models", ".", "UrlSignatureAuthPolicy", ":", "param", "RefererAuthPolicy", ":", "[", "Referer", "防盗链", "]", "(", "https", ":", "//", "cloud", ".", "tencent", ".", "com", "/", "document", "/", "product", "/", "266", "/", "14046", ")", "配置信息。", "注意:此字段可能返回", "null,表示取不到有效值。", ":", "type", "RefererAuthPolicy", ":", ":", "class", ":", "tencentcloud", ".", "vod", ".", "v20180717", ".", "models", ".", "RefererAuthPolicy", ":", "param", "CreateTime", ":", "域名添加到腾讯云点播系统中的时间。", "<li", ">", "格式按照", "ISO", "8601标准表示,详见", "[", "ISO", "日期格式说明", "]", "(", "https", ":", "//", "cloud", ".", "tencent", ".", "com", "/", "document", "/", "product", "/", "266", "/", "11732#iso", "-", ".", "E6", ".", "97", ".", "A5", ".", "E6", ".", "9C", ".", "9F", ".", "E6", ".", "A0", ".", "BC", ".", "E5", ".", "BC", ".", "8F", ")", "。<", "/", "li", ">", ":", "type", "CreateTime", ":", "str" ]
def __init__(self): r""" :param Domain: 域名名称。 :type Domain: str :param AccelerateAreaInfos: 加速地区信息。 注意:此字段可能返回 null,表示取不到有效值。 :type AccelerateAreaInfos: list of AccelerateAreaInfo :param DeployStatus: 部署状态,取值有: <li>Online:上线;</li> <li>Deploying:部署中;</li> <li>Locked: 锁定中,出现该状态时,无法对该域名进行部署变更。</li> :type DeployStatus: str :param HTTPSConfig: HTTPS 配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type HTTPSConfig: :class:`tencentcloud.vod.v20180717.models.DomainHTTPSConfig` :param UrlSignatureAuthPolicy: [Key 防盗链](https://cloud.tencent.com/document/product/266/14047)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type UrlSignatureAuthPolicy: :class:`tencentcloud.vod.v20180717.models.UrlSignatureAuthPolicy` :param RefererAuthPolicy: [Referer 防盗链](https://cloud.tencent.com/document/product/266/14046)配置信息。 注意:此字段可能返回 null,表示取不到有效值。 :type RefererAuthPolicy: :class:`tencentcloud.vod.v20180717.models.RefererAuthPolicy` :param CreateTime: 域名添加到腾讯云点播系统中的时间。 <li>格式按照 ISO 8601标准表示,详见 [ISO 日期格式说明](https://cloud.tencent.com/document/product/266/11732#iso-.E6.97.A5.E6.9C.9F.E6.A0.BC.E5.BC.8F)。</li> :type CreateTime: str """ self.Domain = None self.AccelerateAreaInfos = None self.DeployStatus = None self.HTTPSConfig = None self.UrlSignatureAuthPolicy = None self.RefererAuthPolicy = None self.CreateTime = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "Domain", "=", "None", "self", ".", "AccelerateAreaInfos", "=", "None", "self", ".", "DeployStatus", "=", "None", "self", ".", "HTTPSConfig", "=", "None", "self", ".", "UrlSignatureAuthPolicy", "=", "None", "self", ".", "RefererAuthPolicy", "=", "None", "self", ".", "CreateTime", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/vod/v20180717/models.py#L10855-L10886
Teradata/stacki
a8085dce179dbe903f65f136f4b63bcc076cc057
common/src/stack/command/stack/argument_processors/firmware.py
python
FirmwareArgProcessor.ensure_unique_models
(self, make, models)
Ensures that none of the given model names already exist in the database for the given make. If an empty iterable is provided, a CommandError is raised. If any model name is blank, a CommandError is raised.
Ensures that none of the given model names already exist in the database for the given make.
[ "Ensures", "that", "none", "of", "the", "given", "model", "names", "already", "exist", "in", "the", "database", "for", "the", "given", "make", "." ]
def ensure_unique_models(self, make, models): """Ensures that none of the given model names already exist in the database for the given make. If an empty iterable is provided, a CommandError is raised. If any model name is blank, a CommandError is raised. """ # We don't require that the make exists because a set of models for a non-existent make would be # entirely new in that they are going to be added under a make that doesn't already exist. models = tuple(models) if not models: raise CommandError(cmd = self, msg = "Models are required.") # No empty strings allowed. if not all(models): raise CommandError(cmd = self, msg = "A model cannot be an empty string.") # ensure the model name doesn't already exist for the given make existing_makes_models = [ (make, model) for make, model, exists in ( (make, model, self.model_exists(make, model)) for model in models ) if exists ] if existing_makes_models: raise CommandError(cmd = self, msg = f"The following make and model combinations already exist {existing_makes_models}.")
[ "def", "ensure_unique_models", "(", "self", ",", "make", ",", "models", ")", ":", "# We don't require that the make exists because a set of models for a non-existent make would be", "# entirely new in that they are going to be added under a make that doesn't already exist.", "models", "=", "tuple", "(", "models", ")", "if", "not", "models", ":", "raise", "CommandError", "(", "cmd", "=", "self", ",", "msg", "=", "\"Models are required.\"", ")", "# No empty strings allowed.", "if", "not", "all", "(", "models", ")", ":", "raise", "CommandError", "(", "cmd", "=", "self", ",", "msg", "=", "\"A model cannot be an empty string.\"", ")", "# ensure the model name doesn't already exist for the given make", "existing_makes_models", "=", "[", "(", "make", ",", "model", ")", "for", "make", ",", "model", ",", "exists", "in", "(", "(", "make", ",", "model", ",", "self", ".", "model_exists", "(", "make", ",", "model", ")", ")", "for", "model", "in", "models", ")", "if", "exists", "]", "if", "existing_makes_models", ":", "raise", "CommandError", "(", "cmd", "=", "self", ",", "msg", "=", "f\"The following make and model combinations already exist {existing_makes_models}.\"", ")" ]
https://github.com/Teradata/stacki/blob/a8085dce179dbe903f65f136f4b63bcc076cc057/common/src/stack/command/stack/argument_processors/firmware.py#L136-L162
PlasmaPy/PlasmaPy
78d63e341216475ce3318e1409296480407c9019
plasmapy/particles/ionization_state.py
python
IonizationState.ionic_symbols
(self)
return self.to_list().symbols
The ionic symbols for all charge states.
The ionic symbols for all charge states.
[ "The", "ionic", "symbols", "for", "all", "charge", "states", "." ]
def ionic_symbols(self) -> List[str]: """The ionic symbols for all charge states.""" return self.to_list().symbols
[ "def", "ionic_symbols", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "self", ".", "to_list", "(", ")", ".", "symbols" ]
https://github.com/PlasmaPy/PlasmaPy/blob/78d63e341216475ce3318e1409296480407c9019/plasmapy/particles/ionization_state.py#L730-L732
benedekrozemberczki/GEMSEC
c023122bdafe88278cdbd24b7fcf9dafe8e95b34
src/calculation_helper.py
python
alias_setup
(probs)
return J, q
Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details
Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details
[ "Compute", "utility", "lists", "for", "non", "-", "uniform", "sampling", "from", "discrete", "distributions", ".", "Refer", "to", "https", ":", "//", "hips", ".", "seas", ".", "harvard", ".", "edu", "/", "blog", "/", "2013", "/", "03", "/", "03", "/", "the", "-", "alias", "-", "method", "-", "efficient", "-", "sampling", "-", "with", "-", "many", "-", "discrete", "-", "outcomes", "/", "for", "details" ]
def alias_setup(probs): """ Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details """ K = len(probs) q = np.zeros(K) J = np.zeros(K, dtype=np.int) smaller = [] larger = [] for kk, prob in enumerate(probs): q[kk] = K*prob if q[kk] < 1.0: smaller.append(kk) else: larger.append(kk) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() J[small] = large q[large] = q[large] + q[small] - 1.0 if q[large] < 1.0: smaller.append(large) else: larger.append(large) return J, q
[ "def", "alias_setup", "(", "probs", ")", ":", "K", "=", "len", "(", "probs", ")", "q", "=", "np", ".", "zeros", "(", "K", ")", "J", "=", "np", ".", "zeros", "(", "K", ",", "dtype", "=", "np", ".", "int", ")", "smaller", "=", "[", "]", "larger", "=", "[", "]", "for", "kk", ",", "prob", "in", "enumerate", "(", "probs", ")", ":", "q", "[", "kk", "]", "=", "K", "*", "prob", "if", "q", "[", "kk", "]", "<", "1.0", ":", "smaller", ".", "append", "(", "kk", ")", "else", ":", "larger", ".", "append", "(", "kk", ")", "while", "len", "(", "smaller", ")", ">", "0", "and", "len", "(", "larger", ")", ">", "0", ":", "small", "=", "smaller", ".", "pop", "(", ")", "large", "=", "larger", ".", "pop", "(", ")", "J", "[", "small", "]", "=", "large", "q", "[", "large", "]", "=", "q", "[", "large", "]", "+", "q", "[", "small", "]", "-", "1.0", "if", "q", "[", "large", "]", "<", "1.0", ":", "smaller", ".", "append", "(", "large", ")", "else", ":", "larger", ".", "append", "(", "large", ")", "return", "J", ",", "q" ]
https://github.com/benedekrozemberczki/GEMSEC/blob/c023122bdafe88278cdbd24b7fcf9dafe8e95b34/src/calculation_helper.py#L300-L330
mlflow/mlflow
364aca7daf0fcee3ec407ae0b1b16d9cb3085081
mlflow/store/tracking/dbmodels/models.py
python
SqlTag.to_mlflow_entity
(self)
return RunTag(key=self.key, value=self.value)
Convert DB model to corresponding MLflow entity. :return: :py:class:`mlflow.entities.RunTag`.
Convert DB model to corresponding MLflow entity.
[ "Convert", "DB", "model", "to", "corresponding", "MLflow", "entity", "." ]
def to_mlflow_entity(self): """ Convert DB model to corresponding MLflow entity. :return: :py:class:`mlflow.entities.RunTag`. """ return RunTag(key=self.key, value=self.value)
[ "def", "to_mlflow_entity", "(", "self", ")", ":", "return", "RunTag", "(", "key", "=", "self", ".", "key", ",", "value", "=", "self", ".", "value", ")" ]
https://github.com/mlflow/mlflow/blob/364aca7daf0fcee3ec407ae0b1b16d9cb3085081/mlflow/store/tracking/dbmodels/models.py#L282-L288
BlackLight/platypush
a6b552504e2ac327c94f3a28b607061b6b60cf36
platypush/plugins/music/spotify/__init__.py
python
MusicSpotifyPlugin.get_albums
(self, limit: int = 50, offset: int = 0)
return SpotifyAlbumSchema().dump( self._spotify_paginate_results( '/v1/me/albums', limit=limit, offset=offset, ), many=True )
Get the list of albums saved by the user. :param limit: Maximum number of results (default: 50). :param offset: Return results starting from this index (default: 0). :return: .. schema:: spotify.SpotifyAlbumSchema(many=True)
Get the list of albums saved by the user.
[ "Get", "the", "list", "of", "albums", "saved", "by", "the", "user", "." ]
def get_albums(self, limit: int = 50, offset: int = 0) -> List[dict]: """ Get the list of albums saved by the user. :param limit: Maximum number of results (default: 50). :param offset: Return results starting from this index (default: 0). :return: .. schema:: spotify.SpotifyAlbumSchema(many=True) """ return SpotifyAlbumSchema().dump( self._spotify_paginate_results( '/v1/me/albums', limit=limit, offset=offset, ), many=True )
[ "def", "get_albums", "(", "self", ",", "limit", ":", "int", "=", "50", ",", "offset", ":", "int", "=", "0", ")", "->", "List", "[", "dict", "]", ":", "return", "SpotifyAlbumSchema", "(", ")", ".", "dump", "(", "self", ".", "_spotify_paginate_results", "(", "'/v1/me/albums'", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ",", ")", ",", "many", "=", "True", ")" ]
https://github.com/BlackLight/platypush/blob/a6b552504e2ac327c94f3a28b607061b6b60cf36/platypush/plugins/music/spotify/__init__.py#L801-L815
ITCoders/Human-detection-and-Tracking
0f132b9d809be8f84859a21fd6c310026089db52
create_face_model.py
python
get_images_and_labels
(path)
return images, labels
convert images to matrices assign label to every image according to person Using test data to make the machine learn this data Args: path: path to images directory Returns: matrix of images, labels
convert images to matrices assign label to every image according to person Using test data to make the machine learn this data Args: path: path to images directory
[ "convert", "images", "to", "matrices", "assign", "label", "to", "every", "image", "according", "to", "person", "Using", "test", "data", "to", "make", "the", "machine", "learn", "this", "data", "Args", ":", "path", ":", "path", "to", "images", "directory" ]
def get_images_and_labels(path): """ convert images to matrices assign label to every image according to person Using test data to make the machine learn this data Args: path: path to images directory Returns: matrix of images, labels """ i = 0 image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')] images = [] labels = [] for image_path in image_paths: image_pil = Image.open(image_path).convert('L') image = np.array(image_pil, 'uint8') image = imutils.resize(image, width=min(500, image.shape[1])) nbr = int(os.path.split(image_path)[1].split( ".")[0].replace("subject", "")) faces = faceCascade.detectMultiScale(image) for (x, y, w, h) in faces: images.append(image[y: y + h, x: x + w]) # cv2.imwrite("subject02."+str(i)+".jpg",image[y: y + h, x: x + w]) # i=i+1 labels.append(nbr) cv2.imshow("Adding faces to traning set", image[y: y + h, x: x + w]) cv2.imshow('win', image[y: y + h, x: x + w]) cv2.waitKey(50) return images, labels
[ "def", "get_images_and_labels", "(", "path", ")", ":", "i", "=", "0", "image_paths", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", "if", "not", "f", ".", "endswith", "(", "'.sad'", ")", "]", "images", "=", "[", "]", "labels", "=", "[", "]", "for", "image_path", "in", "image_paths", ":", "image_pil", "=", "Image", ".", "open", "(", "image_path", ")", ".", "convert", "(", "'L'", ")", "image", "=", "np", ".", "array", "(", "image_pil", ",", "'uint8'", ")", "image", "=", "imutils", ".", "resize", "(", "image", ",", "width", "=", "min", "(", "500", ",", "image", ".", "shape", "[", "1", "]", ")", ")", "nbr", "=", "int", "(", "os", ".", "path", ".", "split", "(", "image_path", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "replace", "(", "\"subject\"", ",", "\"\"", ")", ")", "faces", "=", "faceCascade", ".", "detectMultiScale", "(", "image", ")", "for", "(", "x", ",", "y", ",", "w", ",", "h", ")", "in", "faces", ":", "images", ".", "append", "(", "image", "[", "y", ":", "y", "+", "h", ",", "x", ":", "x", "+", "w", "]", ")", "# cv2.imwrite(\"subject02.\"+str(i)+\".jpg\",image[y: y + h, x: x + w])", "# i=i+1", "labels", ".", "append", "(", "nbr", ")", "cv2", ".", "imshow", "(", "\"Adding faces to traning set\"", ",", "image", "[", "y", ":", "y", "+", "h", ",", "x", ":", "x", "+", "w", "]", ")", "cv2", ".", "imshow", "(", "'win'", ",", "image", "[", "y", ":", "y", "+", "h", ",", "x", ":", "x", "+", "w", "]", ")", "cv2", ".", "waitKey", "(", "50", ")", "return", "images", ",", "labels" ]
https://github.com/ITCoders/Human-detection-and-Tracking/blob/0f132b9d809be8f84859a21fd6c310026089db52/create_face_model.py#L15-L47
nucleic/enaml
65c2a2a2d765e88f2e1103046680571894bb41ed
enaml/qt/qt_image_view.py
python
QImageView.allowUpscaling
(self)
return self._allow_upscaling
Returns whether or not the image can be scaled greater than its natural size.
Returns whether or not the image can be scaled greater than its natural size.
[ "Returns", "whether", "or", "not", "the", "image", "can", "be", "scaled", "greater", "than", "its", "natural", "size", "." ]
def allowUpscaling(self): """ Returns whether or not the image can be scaled greater than its natural size. """ return self._allow_upscaling
[ "def", "allowUpscaling", "(", "self", ")", ":", "return", "self", ".", "_allow_upscaling" ]
https://github.com/nucleic/enaml/blob/65c2a2a2d765e88f2e1103046680571894bb41ed/enaml/qt/qt_image_view.py#L160-L165
skylander86/lambda-text-extractor
6da52d077a2fc571e38bfe29c33ae68f6443cd5a
lib-linux_x64/requests/adapters.py
python
HTTPAdapter.init_poolmanager
(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs)
Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
Initializes a urllib3 PoolManager.
[ "Initializes", "a", "urllib3", "PoolManager", "." ]
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs)
[ "def", "init_poolmanager", "(", "self", ",", "connections", ",", "maxsize", ",", "block", "=", "DEFAULT_POOLBLOCK", ",", "*", "*", "pool_kwargs", ")", ":", "# save these values for pickling", "self", ".", "_pool_connections", "=", "connections", "self", ".", "_pool_maxsize", "=", "maxsize", "self", ".", "_pool_block", "=", "block", "self", ".", "poolmanager", "=", "PoolManager", "(", "num_pools", "=", "connections", ",", "maxsize", "=", "maxsize", ",", "block", "=", "block", ",", "strict", "=", "True", ",", "*", "*", "pool_kwargs", ")" ]
https://github.com/skylander86/lambda-text-extractor/blob/6da52d077a2fc571e38bfe29c33ae68f6443cd5a/lib-linux_x64/requests/adapters.py#L144-L162
ljvmiranda921/seagull
26828319066b53301170345ba28ae27cb795d1fd
seagull/board.py
python
Board.add
(self, lifeform: Lifeform, loc: Tuple[int, int])
Add a lifeform to the board Parameters ---------- lifeform: :obj:`seagull.lifeforms.base.Lifeform` A lifeform that can evolve in the board loc : array_like of size 2 Initial location of the lifeform on the board
Add a lifeform to the board
[ "Add", "a", "lifeform", "to", "the", "board" ]
def add(self, lifeform: Lifeform, loc: Tuple[int, int]): """Add a lifeform to the board Parameters ---------- lifeform: :obj:`seagull.lifeforms.base.Lifeform` A lifeform that can evolve in the board loc : array_like of size 2 Initial location of the lifeform on the board """ try: row, col = loc height, width = lifeform.size self.state[row : row + height, col : col + width] = lifeform.layout except ValueError: logger.error("Lifeform is out-of-bounds!") raise
[ "def", "add", "(", "self", ",", "lifeform", ":", "Lifeform", ",", "loc", ":", "Tuple", "[", "int", ",", "int", "]", ")", ":", "try", ":", "row", ",", "col", "=", "loc", "height", ",", "width", "=", "lifeform", ".", "size", "self", ".", "state", "[", "row", ":", "row", "+", "height", ",", "col", ":", "col", "+", "width", "]", "=", "lifeform", ".", "layout", "except", "ValueError", ":", "logger", ".", "error", "(", "\"Lifeform is out-of-bounds!\"", ")", "raise" ]
https://github.com/ljvmiranda921/seagull/blob/26828319066b53301170345ba28ae27cb795d1fd/seagull/board.py#L59-L75
HIPS/Spearmint
990d27d4477bbc9b0d5cfb2c950fc387decf6ea2
spearmint/resources/resource.py
python
Resource.acceptingJobs
(self, jobs)
return True
Is this resource currently accepting new jobs?
Is this resource currently accepting new jobs?
[ "Is", "this", "resource", "currently", "accepting", "new", "jobs?" ]
def acceptingJobs(self, jobs): """Is this resource currently accepting new jobs?""" if self.numPending(jobs) >= self.max_concurrent: return False if self.numComplete(jobs) >= self.max_finished_jobs: return False return True
[ "def", "acceptingJobs", "(", "self", ",", "jobs", ")", ":", "if", "self", ".", "numPending", "(", "jobs", ")", ">=", "self", ".", "max_concurrent", ":", "return", "False", "if", "self", ".", "numComplete", "(", "jobs", ")", ">=", "self", ".", "max_finished_jobs", ":", "return", "False", "return", "True" ]
https://github.com/HIPS/Spearmint/blob/990d27d4477bbc9b0d5cfb2c950fc387decf6ea2/spearmint/resources/resource.py#L320-L328
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/venv/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py
python
VersionControl.switch
(self, dest, url, rev_options)
Switch the repo at ``dest`` to point to ``URL``. Args: rev_options: a RevOptions object.
Switch the repo at ``dest`` to point to ``URL``.
[ "Switch", "the", "repo", "at", "dest", "to", "point", "to", "URL", "." ]
def switch(self, dest, url, rev_options): """ Switch the repo at ``dest`` to point to ``URL``. Args: rev_options: a RevOptions object. """ raise NotImplementedError
[ "def", "switch", "(", "self", ",", "dest", ",", "url", ",", "rev_options", ")", ":", "raise", "NotImplementedError" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/venv/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py#L396-L403
tlsfuzzer/tlslite-ng
8720db53067ba4f7bb7b5a32d682033d8b5446f9
tlslite/extensions.py
python
ALPNExtension.__repr__
(self)
return "ALPNExtension(protocol_names={0!r})"\ .format(self.protocol_names)
Create programmer-readable representation of object :rtype: str
Create programmer-readable representation of object
[ "Create", "programmer", "-", "readable", "representation", "of", "object" ]
def __repr__(self): """ Create programmer-readable representation of object :rtype: str """ return "ALPNExtension(protocol_names={0!r})"\ .format(self.protocol_names)
[ "def", "__repr__", "(", "self", ")", ":", "return", "\"ALPNExtension(protocol_names={0!r})\"", ".", "format", "(", "self", ".", "protocol_names", ")" ]
https://github.com/tlsfuzzer/tlslite-ng/blob/8720db53067ba4f7bb7b5a32d682033d8b5446f9/tlslite/extensions.py#L1544-L1551
OkunaOrg/okuna-api
f87d8e80d2f182c01dbce68155ded0078ee707e4
openbook_communities/queries.py
python
make_search_communities_query_for_user
(query, user, excluded_from_profile_posts=True)
return search_communities_query
:param query: :param user: :param excluded_from_profile_posts: Whether to include the communities that were excluded from profile posts :return:
:param query: :param user: :param excluded_from_profile_posts: Whether to include the communities that were excluded from profile posts :return:
[ ":", "param", "query", ":", ":", "param", "user", ":", ":", "param", "excluded_from_profile_posts", ":", "Whether", "to", "include", "the", "communities", "that", "were", "excluded", "from", "profile", "posts", ":", "return", ":" ]
def make_search_communities_query_for_user(query, user, excluded_from_profile_posts=True): """ :param query: :param user: :param excluded_from_profile_posts: Whether to include the communities that were excluded from profile posts :return: """ search_communities_query = make_search_communities_query(query=query) if not excluded_from_profile_posts: search_communities_query.add(make_exclude_excluded_communities_from_profile_posts_query_for_user(user=user), Q.AND) return search_communities_query
[ "def", "make_search_communities_query_for_user", "(", "query", ",", "user", ",", "excluded_from_profile_posts", "=", "True", ")", ":", "search_communities_query", "=", "make_search_communities_query", "(", "query", "=", "query", ")", "if", "not", "excluded_from_profile_posts", ":", "search_communities_query", ".", "add", "(", "make_exclude_excluded_communities_from_profile_posts_query_for_user", "(", "user", "=", "user", ")", ",", "Q", ".", "AND", ")", "return", "search_communities_query" ]
https://github.com/OkunaOrg/okuna-api/blob/f87d8e80d2f182c01dbce68155ded0078ee707e4/openbook_communities/queries.py#L27-L40
blurstudio/cross3d
277968d1227de740fc87ef61005c75034420eadf
cross3d/abstract/abstractuserprops.py
python
AbstractUserProps.unescapeValue
(string)
return string
Replaces any html codes with their associated unstorable characters
Replaces any html codes with their associated unstorable characters
[ "Replaces", "any", "html", "codes", "with", "their", "associated", "unstorable", "characters" ]
def unescapeValue(string): """ Replaces any html codes with their associated unstorable characters """ string = unicode(string) try: return json.loads( string ) except ValueError: pass string, typ = AbstractUserProps._decodeString(string) if typ == float: return float(string) elif typ == int: return int(string) elif typ in (list, dict, tuple, bool, OrderedDict): return eval(string) return string
[ "def", "unescapeValue", "(", "string", ")", ":", "string", "=", "unicode", "(", "string", ")", "try", ":", "return", "json", ".", "loads", "(", "string", ")", "except", "ValueError", ":", "pass", "string", ",", "typ", "=", "AbstractUserProps", ".", "_decodeString", "(", "string", ")", "if", "typ", "==", "float", ":", "return", "float", "(", "string", ")", "elif", "typ", "==", "int", ":", "return", "int", "(", "string", ")", "elif", "typ", "in", "(", "list", ",", "dict", ",", "tuple", ",", "bool", ",", "OrderedDict", ")", ":", "return", "eval", "(", "string", ")", "return", "string" ]
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/abstract/abstractuserprops.py#L199-L216
uber/petastorm
3579e68b86d17b57339efd0da1e8a56033d121d1
petastorm/codecs.py
python
CompressedImageCodec.__init__
(self, image_codec='png', quality=80)
CompressedImageCodec would compress/encompress images. :param image_codec: any format string supported by opencv. e.g. ``png``, ``jpeg`` :param quality: used when using ``jpeg`` lossy compression
CompressedImageCodec would compress/encompress images.
[ "CompressedImageCodec", "would", "compress", "/", "encompress", "images", "." ]
def __init__(self, image_codec='png', quality=80): """CompressedImageCodec would compress/encompress images. :param image_codec: any format string supported by opencv. e.g. ``png``, ``jpeg`` :param quality: used when using ``jpeg`` lossy compression """ assert OPENCV_AVAILABLE, "{} requires opencv-python to be installed".format(type(self).__name__) self._image_codec = '.' + image_codec self._quality = quality
[ "def", "__init__", "(", "self", ",", "image_codec", "=", "'png'", ",", "quality", "=", "80", ")", ":", "assert", "OPENCV_AVAILABLE", ",", "\"{} requires opencv-python to be installed\"", ".", "format", "(", "type", "(", "self", ")", ".", "__name__", ")", "self", ".", "_image_codec", "=", "'.'", "+", "image_codec", "self", ".", "_quality", "=", "quality" ]
https://github.com/uber/petastorm/blob/3579e68b86d17b57339efd0da1e8a56033d121d1/petastorm/codecs.py#L59-L68
dmeranda/demjson
5bc65974e7141746acc88c581f5d2dfb8ea14064
demjson.py
python
JSON.__sanity_check_start
(self, state)
return is_sane
Check that the document seems sane by looking at the first couple characters. Check that the decoding seems sane. Per RFC 4627 section 3: "Since the first two characters of a JSON text will always be ASCII characters [RFC0020], ..." [WAS removed from RFC 7158, but still valid via the grammar.] This check is probably not necessary, but it allows us to raise a suitably descriptive error rather than an obscure syntax error later on. Note that the RFC requirements of two ASCII characters seems to be an incorrect statement as a JSON string literal may have as it's first character any unicode character. Thus the first two characters will always be ASCII, unless the first character is a quotation mark. And in non-strict mode we can also have a few other characters too.
Check that the document seems sane by looking at the first couple characters.
[ "Check", "that", "the", "document", "seems", "sane", "by", "looking", "at", "the", "first", "couple", "characters", "." ]
def __sanity_check_start(self, state): """Check that the document seems sane by looking at the first couple characters. Check that the decoding seems sane. Per RFC 4627 section 3: "Since the first two characters of a JSON text will always be ASCII characters [RFC0020], ..." [WAS removed from RFC 7158, but still valid via the grammar.] This check is probably not necessary, but it allows us to raise a suitably descriptive error rather than an obscure syntax error later on. Note that the RFC requirements of two ASCII characters seems to be an incorrect statement as a JSON string literal may have as it's first character any unicode character. Thus the first two characters will always be ASCII, unless the first character is a quotation mark. And in non-strict mode we can also have a few other characters too. """ is_sane = True unitxt = state.buf.peekstr(2) if len(unitxt) >= 2: first, second = unitxt[:2] if first in self._string_quotes: pass # second can be anything inside string literal else: if ((ord(first) < 0x20 or ord(first) > 0x7f) or \ (ord(second) < 0x20 or ord(second) > 0x7f)) and \ (not self.isws(first) and not self.isws(second)): # Found non-printable ascii, must check unicode # categories to see if the character is legal. # Only whitespace, line and paragraph separators, # and format control chars are legal here. import unicodedata catfirst = unicodedata.category(unicode(first)) catsecond = unicodedata.category(unicode(second)) if catfirst not in ('Zs','Zl','Zp','Cf') or \ catsecond not in ('Zs','Zl','Zp','Cf'): state.push_fatal( 'The input is gibberish, is the Unicode encoding correct?' ) return is_sane
[ "def", "__sanity_check_start", "(", "self", ",", "state", ")", ":", "is_sane", "=", "True", "unitxt", "=", "state", ".", "buf", ".", "peekstr", "(", "2", ")", "if", "len", "(", "unitxt", ")", ">=", "2", ":", "first", ",", "second", "=", "unitxt", "[", ":", "2", "]", "if", "first", "in", "self", ".", "_string_quotes", ":", "pass", "# second can be anything inside string literal", "else", ":", "if", "(", "(", "ord", "(", "first", ")", "<", "0x20", "or", "ord", "(", "first", ")", ">", "0x7f", ")", "or", "(", "ord", "(", "second", ")", "<", "0x20", "or", "ord", "(", "second", ")", ">", "0x7f", ")", ")", "and", "(", "not", "self", ".", "isws", "(", "first", ")", "and", "not", "self", ".", "isws", "(", "second", ")", ")", ":", "# Found non-printable ascii, must check unicode", "# categories to see if the character is legal.", "# Only whitespace, line and paragraph separators,", "# and format control chars are legal here.", "import", "unicodedata", "catfirst", "=", "unicodedata", ".", "category", "(", "unicode", "(", "first", ")", ")", "catsecond", "=", "unicodedata", ".", "category", "(", "unicode", "(", "second", ")", ")", "if", "catfirst", "not", "in", "(", "'Zs'", ",", "'Zl'", ",", "'Zp'", ",", "'Cf'", ")", "or", "catsecond", "not", "in", "(", "'Zs'", ",", "'Zl'", ",", "'Zp'", ",", "'Cf'", ")", ":", "state", ".", "push_fatal", "(", "'The input is gibberish, is the Unicode encoding correct?'", ")", "return", "is_sane" ]
https://github.com/dmeranda/demjson/blob/5bc65974e7141746acc88c581f5d2dfb8ea14064/demjson.py#L4923-L4963
microsoft/unilm
65f15af2a307ebb64cfb25adf54375b002e6fe8d
xtune/src/transformers/data/processors/glue.py
python
RteProcessor.get_example_from_tensor_dict
(self, tensor_dict)
return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), )
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence1"].numpy().decode("utf-8"), tensor_dict["sentence2"].numpy().decode("utf-8"), str(tensor_dict["label"].numpy()), )
[ "def", "get_example_from_tensor_dict", "(", "self", ",", "tensor_dict", ")", ":", "return", "InputExample", "(", "tensor_dict", "[", "\"idx\"", "]", ".", "numpy", "(", ")", ",", "tensor_dict", "[", "\"sentence1\"", "]", ".", "numpy", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ",", "tensor_dict", "[", "\"sentence2\"", "]", ".", "numpy", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ",", "str", "(", "tensor_dict", "[", "\"label\"", "]", ".", "numpy", "(", ")", ")", ",", ")" ]
https://github.com/microsoft/unilm/blob/65f15af2a307ebb64cfb25adf54375b002e6fe8d/xtune/src/transformers/data/processors/glue.py#L446-L453
bugy/script-server
9a57ce15903c81bcb537b872f1330ee55ba31563
src/react/observable.py
python
Observable.__init__
(self)
[]
def __init__(self): super().__init__()
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", ")" ]
https://github.com/bugy/script-server/blob/9a57ce15903c81bcb537b872f1330ee55ba31563/src/react/observable.py#L104-L105
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
examples/applications/plot_model_complexity_influence.py
python
benchmark_influence
(conf)
return prediction_powers, prediction_times, complexities
Benchmark influence of `changing_param` on both MSE and latency.
Benchmark influence of `changing_param` on both MSE and latency.
[ "Benchmark", "influence", "of", "changing_param", "on", "both", "MSE", "and", "latency", "." ]
def benchmark_influence(conf): """ Benchmark influence of `changing_param` on both MSE and latency. """ prediction_times = [] prediction_powers = [] complexities = [] for param_value in conf["changing_param_values"]: conf["tuned_params"][conf["changing_param"]] = param_value estimator = conf["estimator"](**conf["tuned_params"]) print("Benchmarking %s" % estimator) estimator.fit(conf["data"]["X_train"], conf["data"]["y_train"]) conf["postfit_hook"](estimator) complexity = conf["complexity_computer"](estimator) complexities.append(complexity) start_time = time.time() for _ in range(conf["n_samples"]): y_pred = estimator.predict(conf["data"]["X_test"]) elapsed_time = (time.time() - start_time) / float(conf["n_samples"]) prediction_times.append(elapsed_time) pred_score = conf["prediction_performance_computer"]( conf["data"]["y_test"], y_pred ) prediction_powers.append(pred_score) print( "Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % ( complexity, conf["prediction_performance_label"], pred_score, elapsed_time, ) ) return prediction_powers, prediction_times, complexities
[ "def", "benchmark_influence", "(", "conf", ")", ":", "prediction_times", "=", "[", "]", "prediction_powers", "=", "[", "]", "complexities", "=", "[", "]", "for", "param_value", "in", "conf", "[", "\"changing_param_values\"", "]", ":", "conf", "[", "\"tuned_params\"", "]", "[", "conf", "[", "\"changing_param\"", "]", "]", "=", "param_value", "estimator", "=", "conf", "[", "\"estimator\"", "]", "(", "*", "*", "conf", "[", "\"tuned_params\"", "]", ")", "print", "(", "\"Benchmarking %s\"", "%", "estimator", ")", "estimator", ".", "fit", "(", "conf", "[", "\"data\"", "]", "[", "\"X_train\"", "]", ",", "conf", "[", "\"data\"", "]", "[", "\"y_train\"", "]", ")", "conf", "[", "\"postfit_hook\"", "]", "(", "estimator", ")", "complexity", "=", "conf", "[", "\"complexity_computer\"", "]", "(", "estimator", ")", "complexities", ".", "append", "(", "complexity", ")", "start_time", "=", "time", ".", "time", "(", ")", "for", "_", "in", "range", "(", "conf", "[", "\"n_samples\"", "]", ")", ":", "y_pred", "=", "estimator", ".", "predict", "(", "conf", "[", "\"data\"", "]", "[", "\"X_test\"", "]", ")", "elapsed_time", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "/", "float", "(", "conf", "[", "\"n_samples\"", "]", ")", "prediction_times", ".", "append", "(", "elapsed_time", ")", "pred_score", "=", "conf", "[", "\"prediction_performance_computer\"", "]", "(", "conf", "[", "\"data\"", "]", "[", "\"y_test\"", "]", ",", "y_pred", ")", "prediction_powers", ".", "append", "(", "pred_score", ")", "print", "(", "\"Complexity: %d | %s: %.4f | Pred. Time: %fs\\n\"", "%", "(", "complexity", ",", "conf", "[", "\"prediction_performance_label\"", "]", ",", "pred_score", ",", "elapsed_time", ",", ")", ")", "return", "prediction_powers", ",", "prediction_times", ",", "complexities" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/examples/applications/plot_model_complexity_influence.py#L103-L137
gleeda/memtriage
c24f4859995cccb9d88ccc0118d90693019cc1d5
volatility/volatility/plugins/overlays/windows/kpcr_vtypes.py
python
_KPCROnx86.get_kdbg
(self)
return DebuggerDataList.dereference().dereference_as("_KDDEBUGGER_DATA64")
Find this CPUs KDBG. Please note the KdVersionBlock pointer is NULL on all KPCR structures except the one for the first CPU. In some cases on x64, even the first CPU has a NULL KdVersionBlock, so this is really a hit-or-miss.
Find this CPUs KDBG.
[ "Find", "this", "CPUs", "KDBG", "." ]
def get_kdbg(self): """Find this CPUs KDBG. Please note the KdVersionBlock pointer is NULL on all KPCR structures except the one for the first CPU. In some cases on x64, even the first CPU has a NULL KdVersionBlock, so this is really a hit-or-miss. """ DebuggerDataList = self.KdVersionBlock.dereference_as("_DBGKD_GET_VERSION64").DebuggerDataList # DebuggerDataList is a pointer to unsigned long on x86 # and a pointer to unsigned long long on x64. The first # dereference() dereferences the pointer, and the second # dereference() dereferences the unsigned long or long long # as the actual KDBG address. return DebuggerDataList.dereference().dereference_as("_KDDEBUGGER_DATA64")
[ "def", "get_kdbg", "(", "self", ")", ":", "DebuggerDataList", "=", "self", ".", "KdVersionBlock", ".", "dereference_as", "(", "\"_DBGKD_GET_VERSION64\"", ")", ".", "DebuggerDataList", "# DebuggerDataList is a pointer to unsigned long on x86 ", "# and a pointer to unsigned long long on x64. The first ", "# dereference() dereferences the pointer, and the second ", "# dereference() dereferences the unsigned long or long long", "# as the actual KDBG address. ", "return", "DebuggerDataList", ".", "dereference", "(", ")", ".", "dereference_as", "(", "\"_KDDEBUGGER_DATA64\"", ")" ]
https://github.com/gleeda/memtriage/blob/c24f4859995cccb9d88ccc0118d90693019cc1d5/volatility/volatility/plugins/overlays/windows/kpcr_vtypes.py#L34-L49
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/docutils/nodes.py
python
Node.walk
(self, visitor)
return stop
Traverse a tree of `Node` objects, calling the `dispatch_visit()` method of `visitor` when entering each node. (The `walkabout()` method is similar, except it also calls the `dispatch_departure()` method before exiting each node.) This tree traversal supports limited in-place tree modifications. Replacing one node with one or more nodes is OK, as is removing an element. However, if the node removed or replaced occurs after the current node, the old node will still be traversed, and any new nodes will not. Within ``visit`` methods (and ``depart`` methods for `walkabout()`), `TreePruningException` subclasses may be raised (`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`). Parameter `visitor`: A `NodeVisitor` object, containing a ``visit`` implementation for each `Node` subclass encountered. Return true if we should stop the traversal.
Traverse a tree of `Node` objects, calling the `dispatch_visit()` method of `visitor` when entering each node. (The `walkabout()` method is similar, except it also calls the `dispatch_departure()` method before exiting each node.)
[ "Traverse", "a", "tree", "of", "Node", "objects", "calling", "the", "dispatch_visit", "()", "method", "of", "visitor", "when", "entering", "each", "node", ".", "(", "The", "walkabout", "()", "method", "is", "similar", "except", "it", "also", "calls", "the", "dispatch_departure", "()", "method", "before", "exiting", "each", "node", ".", ")" ]
def walk(self, visitor): """ Traverse a tree of `Node` objects, calling the `dispatch_visit()` method of `visitor` when entering each node. (The `walkabout()` method is similar, except it also calls the `dispatch_departure()` method before exiting each node.) This tree traversal supports limited in-place tree modifications. Replacing one node with one or more nodes is OK, as is removing an element. However, if the node removed or replaced occurs after the current node, the old node will still be traversed, and any new nodes will not. Within ``visit`` methods (and ``depart`` methods for `walkabout()`), `TreePruningException` subclasses may be raised (`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`). Parameter `visitor`: A `NodeVisitor` object, containing a ``visit`` implementation for each `Node` subclass encountered. Return true if we should stop the traversal. """ stop = False visitor.document.reporter.debug( 'docutils.nodes.Node.walk calling dispatch_visit for %s' % self.__class__.__name__) try: try: visitor.dispatch_visit(self) except (SkipChildren, SkipNode): return stop except SkipDeparture: # not applicable; ignore pass children = self.children try: for child in children[:]: if child.walk(visitor): stop = True break except SkipSiblings: pass except StopTraversal: stop = True return stop
[ "def", "walk", "(", "self", ",", "visitor", ")", ":", "stop", "=", "False", "visitor", ".", "document", ".", "reporter", ".", "debug", "(", "'docutils.nodes.Node.walk calling dispatch_visit for %s'", "%", "self", ".", "__class__", ".", "__name__", ")", "try", ":", "try", ":", "visitor", ".", "dispatch_visit", "(", "self", ")", "except", "(", "SkipChildren", ",", "SkipNode", ")", ":", "return", "stop", "except", "SkipDeparture", ":", "# not applicable; ignore", "pass", "children", "=", "self", ".", "children", "try", ":", "for", "child", "in", "children", "[", ":", "]", ":", "if", "child", ".", "walk", "(", "visitor", ")", ":", "stop", "=", "True", "break", "except", "SkipSiblings", ":", "pass", "except", "StopTraversal", ":", "stop", "=", "True", "return", "stop" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/docutils/nodes.py#L101-L145
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
TarFile.open
(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs)
Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing
Open a tar archive for reading, writing or appending. Return an appropriate TarFile class.
[ "Open", "a", "tar", "archive", "for", "reading", "writing", "or", "appending", ".", "Return", "an", "appropriate", "TarFile", "class", "." ]
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): """Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: raise ValueError("nothing to open") if mode in ("r", "r:*"): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, "r", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") elif ":" in mode: filemode, comptype = mode.split(":", 1) filemode = filemode or "r" comptype = comptype or "tar" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError("unknown compression type %r" % comptype) return func(name, filemode, fileobj, **kwargs) elif "|" in mode: filemode, comptype = mode.split("|", 1) filemode = filemode or "r" comptype = comptype or "tar" if filemode not in "rw": raise ValueError("mode must be 'r' or 'w'") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in "aw": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError("undiscernible mode")
[ "def", "open", "(", "cls", ",", "name", "=", "None", ",", "mode", "=", "\"r\"", ",", "fileobj", "=", "None", ",", "bufsize", "=", "RECORDSIZE", ",", "*", "*", "kwargs", ")", ":", "if", "not", "name", "and", "not", "fileobj", ":", "raise", "ValueError", "(", "\"nothing to open\"", ")", "if", "mode", "in", "(", "\"r\"", ",", "\"r:*\"", ")", ":", "# Find out which *open() is appropriate for opening the file.", "for", "comptype", "in", "cls", ".", "OPEN_METH", ":", "func", "=", "getattr", "(", "cls", ",", "cls", ".", "OPEN_METH", "[", "comptype", "]", ")", "if", "fileobj", "is", "not", "None", ":", "saved_pos", "=", "fileobj", ".", "tell", "(", ")", "try", ":", "return", "func", "(", "name", ",", "\"r\"", ",", "fileobj", ",", "*", "*", "kwargs", ")", "except", "(", "ReadError", ",", "CompressionError", ")", "as", "e", ":", "if", "fileobj", "is", "not", "None", ":", "fileobj", ".", "seek", "(", "saved_pos", ")", "continue", "raise", "ReadError", "(", "\"file could not be opened successfully\"", ")", "elif", "\":\"", "in", "mode", ":", "filemode", ",", "comptype", "=", "mode", ".", "split", "(", "\":\"", ",", "1", ")", "filemode", "=", "filemode", "or", "\"r\"", "comptype", "=", "comptype", "or", "\"tar\"", "# Select the *open() function according to", "# given compression.", "if", "comptype", "in", "cls", ".", "OPEN_METH", ":", "func", "=", "getattr", "(", "cls", ",", "cls", ".", "OPEN_METH", "[", "comptype", "]", ")", "else", ":", "raise", "CompressionError", "(", "\"unknown compression type %r\"", "%", "comptype", ")", "return", "func", "(", "name", ",", "filemode", ",", "fileobj", ",", "*", "*", "kwargs", ")", "elif", "\"|\"", "in", "mode", ":", "filemode", ",", "comptype", "=", "mode", ".", "split", "(", "\"|\"", ",", "1", ")", "filemode", "=", "filemode", "or", "\"r\"", "comptype", "=", "comptype", "or", "\"tar\"", "if", "filemode", "not", "in", "\"rw\"", ":", "raise", "ValueError", "(", "\"mode must be 'r' or 'w'\"", ")", "stream", "=", "_Stream", "(", "name", ",", "filemode", ",", "comptype", ",", "fileobj", ",", "bufsize", ")", "try", ":", "t", "=", "cls", "(", "name", ",", "filemode", ",", "stream", ",", "*", "*", "kwargs", ")", "except", ":", "stream", ".", "close", "(", ")", "raise", "t", ".", "_extfileobj", "=", "False", "return", "t", "elif", "mode", "in", "\"aw\"", ":", "return", "cls", ".", "taropen", "(", "name", ",", "mode", ",", "fileobj", ",", "*", "*", "kwargs", ")", "raise", "ValueError", "(", "\"undiscernible mode\"", ")" ]
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L1714-L1787
mandarjoshi90/coref
bd04f2e19b9dcc0b8bba848a335e4af3be50741c
conll-2012/v3/scripts/conll2name.py
python
load_config
(cfg_name=None, config_append=[])
return config
Load a configuration file to memory. The given configuration file name can be a full path, in which case we simply read that configuration file. Otherwise, if you give 'myconfig' or something similar, we look in the current directory and the home directory. We also look to see if files with this name and extension '.conf' exist. So for 'myconfig' we would look in the following places: * ./myconfig * ./myconfig.conf * [home]/.myconfig * [home]/.myconfig.conf Once we find the configuration, we load it. We also extend ConfigParser to support ``[]`` notation. So you could look up key ``k`` in section ``s`` with ``config[s,k]``. See :func:`FancyConfigParser` . If config_append is set we use :func:`parse_cfg_args` and add any values it creates to the config object. These values override any previous ones.
Load a configuration file to memory.
[ "Load", "a", "configuration", "file", "to", "memory", "." ]
def load_config(cfg_name=None, config_append=[]): """ Load a configuration file to memory. The given configuration file name can be a full path, in which case we simply read that configuration file. Otherwise, if you give 'myconfig' or something similar, we look in the current directory and the home directory. We also look to see if files with this name and extension '.conf' exist. So for 'myconfig' we would look in the following places: * ./myconfig * ./myconfig.conf * [home]/.myconfig * [home]/.myconfig.conf Once we find the configuration, we load it. We also extend ConfigParser to support ``[]`` notation. So you could look up key ``k`` in section ``s`` with ``config[s,k]``. See :func:`FancyConfigParser` . If config_append is set we use :func:`parse_cfg_args` and add any values it creates to the config object. These values override any previous ones. """ config = FancyConfigParser() if cfg_name: config_locs = [cfg_name + '.conf', os.path.expanduser('~/.' + cfg_name + '.conf'), cfg_name, os.path.expanduser('~/.' + cfg_name)] l = config.read(config_locs) if not l: raise Exception("Couldn't find config file. Looked in:" + "".join(["\n - " + c for c in config_locs]) + "\nto no avail.") for (section, key_name), value in parse_cfg_args(config_append).iteritems(): if not config.has_section(section): config.add_section(section) config.set(section, key_name, value) problems = [] for section in config.sections(): if not is_config_section_registered(section): on.common.log.status("Ignoring unknown configuration section", section) continue for option in config.options(section): if not is_config_registered(section, option): problems.append("Unknown configuration variable %s.%s" % (section, option)) continue value = config.get(section, option) allowed = allowed_config_values(section, option) multiple = allow_multiple_config_values(section, option) values = value.split() if multiple else [value] for value in values: if allowed and not value in allowed: problems.append("Illegal value '%s' for configuration variable %s.%s. Permitted values are: %s" % (value, section, option, ", ".join(["'%s'" % x for x in allowed]))) for option in required_config_options(section): if not config.has_option(section, option): problems.append("Required configuration variable %s.%s is absent" % (section, option)) for section in required_config_sections(): if not config.has_section(section): problems.append("Required configuration section %s is absent" % section) if problems: print_config_docs() on.common.log.status("Configuration Problems:") for problem in problems: on.common.log.status(" " + problem) sys.exit(-1) return config
[ "def", "load_config", "(", "cfg_name", "=", "None", ",", "config_append", "=", "[", "]", ")", ":", "config", "=", "FancyConfigParser", "(", ")", "if", "cfg_name", ":", "config_locs", "=", "[", "cfg_name", "+", "'.conf'", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.'", "+", "cfg_name", "+", "'.conf'", ")", ",", "cfg_name", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.'", "+", "cfg_name", ")", "]", "l", "=", "config", ".", "read", "(", "config_locs", ")", "if", "not", "l", ":", "raise", "Exception", "(", "\"Couldn't find config file. Looked in:\"", "+", "\"\"", ".", "join", "(", "[", "\"\\n - \"", "+", "c", "for", "c", "in", "config_locs", "]", ")", "+", "\"\\nto no avail.\"", ")", "for", "(", "section", ",", "key_name", ")", ",", "value", "in", "parse_cfg_args", "(", "config_append", ")", ".", "iteritems", "(", ")", ":", "if", "not", "config", ".", "has_section", "(", "section", ")", ":", "config", ".", "add_section", "(", "section", ")", "config", ".", "set", "(", "section", ",", "key_name", ",", "value", ")", "problems", "=", "[", "]", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "if", "not", "is_config_section_registered", "(", "section", ")", ":", "on", ".", "common", ".", "log", ".", "status", "(", "\"Ignoring unknown configuration section\"", ",", "section", ")", "continue", "for", "option", "in", "config", ".", "options", "(", "section", ")", ":", "if", "not", "is_config_registered", "(", "section", ",", "option", ")", ":", "problems", ".", "append", "(", "\"Unknown configuration variable %s.%s\"", "%", "(", "section", ",", "option", ")", ")", "continue", "value", "=", "config", ".", "get", "(", "section", ",", "option", ")", "allowed", "=", "allowed_config_values", "(", "section", ",", "option", ")", "multiple", "=", "allow_multiple_config_values", "(", "section", ",", "option", ")", "values", "=", "value", ".", "split", "(", ")", "if", "multiple", "else", "[", "value", "]", "for", "value", "in", "values", ":", "if", "allowed", "and", "not", "value", "in", "allowed", ":", "problems", ".", "append", "(", "\"Illegal value '%s' for configuration variable %s.%s. Permitted values are: %s\"", "%", "(", "value", ",", "section", ",", "option", ",", "\", \"", ".", "join", "(", "[", "\"'%s'\"", "%", "x", "for", "x", "in", "allowed", "]", ")", ")", ")", "for", "option", "in", "required_config_options", "(", "section", ")", ":", "if", "not", "config", ".", "has_option", "(", "section", ",", "option", ")", ":", "problems", ".", "append", "(", "\"Required configuration variable %s.%s is absent\"", "%", "(", "section", ",", "option", ")", ")", "for", "section", "in", "required_config_sections", "(", ")", ":", "if", "not", "config", ".", "has_section", "(", "section", ")", ":", "problems", ".", "append", "(", "\"Required configuration section %s is absent\"", "%", "section", ")", "if", "problems", ":", "print_config_docs", "(", ")", "on", ".", "common", ".", "log", ".", "status", "(", "\"Configuration Problems:\"", ")", "for", "problem", "in", "problems", ":", "on", ".", "common", ".", "log", ".", "status", "(", "\" \"", "+", "problem", ")", "sys", ".", "exit", "(", "-", "1", ")", "return", "config" ]
https://github.com/mandarjoshi90/coref/blob/bd04f2e19b9dcc0b8bba848a335e4af3be50741c/conll-2012/v3/scripts/conll2name.py#L257-L339
plaid/plaid-python
8c60fca608e426f3ff30da8857775946d29e122c
plaid/model/income_verification_status_webhook.py
python
IncomeVerificationStatusWebhook.openapi_types
()
return { 'webhook_type': (str,), # noqa: E501 'webhook_code': (str,), # noqa: E501 'income_verification_id': (str,), # noqa: E501 'item_id': (str,), # noqa: E501 'verification_status': (str,), # noqa: E501 }
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
[ "This", "must", "be", "a", "method", "because", "a", "model", "may", "have", "properties", "that", "are", "of", "type", "self", "this", "must", "run", "after", "the", "class", "is", "loaded" ]
def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'webhook_type': (str,), # noqa: E501 'webhook_code': (str,), # noqa: E501 'income_verification_id': (str,), # noqa: E501 'item_id': (str,), # noqa: E501 'verification_status': (str,), # noqa: E501 }
[ "def", "openapi_types", "(", ")", ":", "return", "{", "'webhook_type'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "'webhook_code'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "'income_verification_id'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "'item_id'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "'verification_status'", ":", "(", "str", ",", ")", ",", "# noqa: E501", "}" ]
https://github.com/plaid/plaid-python/blob/8c60fca608e426f3ff30da8857775946d29e122c/plaid/model/income_verification_status_webhook.py#L69-L84
python-cmd2/cmd2
c1f6114d52161a3b8a32d3cee1c495d79052e1fb
cmd2/py_bridge.py
python
PyBridge.__dir__
(self)
return attributes
Return a custom set of attribute names
Return a custom set of attribute names
[ "Return", "a", "custom", "set", "of", "attribute", "names" ]
def __dir__(self) -> List[str]: """Return a custom set of attribute names""" attributes: List[str] = [] attributes.insert(0, 'cmd_echo') return attributes
[ "def", "__dir__", "(", "self", ")", "->", "List", "[", "str", "]", ":", "attributes", ":", "List", "[", "str", "]", "=", "[", "]", "attributes", ".", "insert", "(", "0", ",", "'cmd_echo'", ")", "return", "attributes" ]
https://github.com/python-cmd2/cmd2/blob/c1f6114d52161a3b8a32d3cee1c495d79052e1fb/cmd2/py_bridge.py#L95-L99
nucleic/enaml
65c2a2a2d765e88f2e1103046680571894bb41ed
enaml/layout/geometry.py
python
BaseBox.right
(self)
return self[1]
The 'right' component of the box.
The 'right' component of the box.
[ "The", "right", "component", "of", "the", "box", "." ]
def right(self): """ The 'right' component of the box. """ return self[1]
[ "def", "right", "(", "self", ")", ":", "return", "self", "[", "1", "]" ]
https://github.com/nucleic/enaml/blob/65c2a2a2d765e88f2e1103046680571894bb41ed/enaml/layout/geometry.py#L193-L197
jorgebastida/django-dajaxice
bf41c7623804856326b09f0724b4cb7d14440d7e
dajaxice/core/Dajaxice.py
python
Dajaxice.clean_method
(self, method)
return method
Clean the http method.
Clean the http method.
[ "Clean", "the", "http", "method", "." ]
def clean_method(self, method): """ Clean the http method. """ method = method.upper() if method not in ['GET', 'POST']: method = 'POST' return method
[ "def", "clean_method", "(", "self", ",", "method", ")", ":", "method", "=", "method", ".", "upper", "(", ")", "if", "method", "not", "in", "[", "'GET'", ",", "'POST'", "]", ":", "method", "=", "'POST'", "return", "method" ]
https://github.com/jorgebastida/django-dajaxice/blob/bf41c7623804856326b09f0724b4cb7d14440d7e/dajaxice/core/Dajaxice.py#L86-L91
Alexey-T/CudaText
6a8b9a974c5d5029c6c273bde83198c83b3a5fb9
app/py/sys/urllib3/poolmanager.py
python
ProxyManager.urlopen
(self, method, url, redirect=True, **kw)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
[ "Same", "as", "HTTP", "(", "S", ")", "ConnectionPool", ".", "urlopen", "url", "must", "be", "absolute", "." ]
def urlopen(self, method, url, redirect=True, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." u = parse_url(url) if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme): # For connections using HTTP CONNECT, httplib sets the necessary # headers on the CONNECT to the proxy. If we're not using CONNECT, # we'll definitely need to set 'Host' at the very least. headers = kw.get("headers", self.headers) kw["headers"] = self._set_proxy_headers(url, headers) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
[ "def", "urlopen", "(", "self", ",", "method", ",", "url", ",", "redirect", "=", "True", ",", "*", "*", "kw", ")", ":", "u", "=", "parse_url", "(", "url", ")", "if", "not", "connection_requires_http_tunnel", "(", "self", ".", "proxy", ",", "self", ".", "proxy_config", ",", "u", ".", "scheme", ")", ":", "# For connections using HTTP CONNECT, httplib sets the necessary", "# headers on the CONNECT to the proxy. If we're not using CONNECT,", "# we'll definitely need to set 'Host' at the very least.", "headers", "=", "kw", ".", "get", "(", "\"headers\"", ",", "self", ".", "headers", ")", "kw", "[", "\"headers\"", "]", "=", "self", ".", "_set_proxy_headers", "(", "url", ",", "headers", ")", "return", "super", "(", "ProxyManager", ",", "self", ")", ".", "urlopen", "(", "method", ",", "url", ",", "redirect", "=", "redirect", ",", "*", "*", "kw", ")" ]
https://github.com/Alexey-T/CudaText/blob/6a8b9a974c5d5029c6c273bde83198c83b3a5fb9/app/py/sys/urllib3/poolmanager.py#L522-L532
dask/dask
c2b962fec1ba45440fe928869dc64cfe9cc36506
dask/dataframe/io/io.py
python
from_bcolz
(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs)
Read BColz CTable into a Dask Dataframe BColz is a fast on-disk compressed column store with careful attention given to compression. https://bcolz.readthedocs.io/en/latest/ Parameters ---------- x : bcolz.ctable chunksize : int, optional The size(rows) of blocks to pull out from ctable. categorize : bool, defaults to True Automatically categorize all string dtypes index : string, optional Column to make the index lock: bool or Lock Lock to use when reading or False for no lock (not-thread-safe) See Also -------- from_array: more generic function not optimized for bcolz
Read BColz CTable into a Dask Dataframe
[ "Read", "BColz", "CTable", "into", "a", "Dask", "Dataframe" ]
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs): """Read BColz CTable into a Dask Dataframe BColz is a fast on-disk compressed column store with careful attention given to compression. https://bcolz.readthedocs.io/en/latest/ Parameters ---------- x : bcolz.ctable chunksize : int, optional The size(rows) of blocks to pull out from ctable. categorize : bool, defaults to True Automatically categorize all string dtypes index : string, optional Column to make the index lock: bool or Lock Lock to use when reading or False for no lock (not-thread-safe) See Also -------- from_array: more generic function not optimized for bcolz """ if lock is True: lock = Lock() import bcolz import dask.array as da if isinstance(x, str): x = bcolz.ctable(rootdir=x) bc_chunklen = max(x[name].chunklen for name in x.names) if chunksize is None and bc_chunklen > 10000: chunksize = bc_chunklen categories = dict() if categorize: for name in x.names: if ( np.issubdtype(x.dtype[name], np.string_) or np.issubdtype(x.dtype[name], np.unicode_) or np.issubdtype(x.dtype[name], np.object_) ): a = da.from_array(x[name], chunks=(chunksize * len(x.names),)) categories[name] = da.unique(a).compute() columns = tuple(x.dtype.names) divisions = tuple(range(0, len(x), chunksize)) divisions = divisions + (len(x) - 1,) if x.rootdir: token = tokenize( (x.rootdir, os.path.getmtime(x.rootdir)), chunksize, categorize, index, kwargs, ) else: token = tokenize( (id(x), x.shape, x.dtype), chunksize, categorize, index, kwargs ) new_name = "from_bcolz-" + token dsk = { (new_name, i): ( dataframe_from_ctable, x, (slice(i * chunksize, (i + 1) * chunksize),), columns, categories, lock, ) for i in range(0, int(ceil(len(x) / chunksize))) } meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock) result = DataFrame(dsk, new_name, meta, divisions) if index: assert index in x.names a = da.from_array(x[index], chunks=(chunksize * len(x.names),)) q = np.linspace(0, 100, len(x) // chunksize + 2) divisions = tuple(da.percentile(a, q).compute()) return set_partition(result, index, divisions, **kwargs) else: return result
[ "def", "from_bcolz", "(", "x", ",", "chunksize", "=", "None", ",", "categorize", "=", "True", ",", "index", "=", "None", ",", "lock", "=", "lock", ",", "*", "*", "kwargs", ")", ":", "if", "lock", "is", "True", ":", "lock", "=", "Lock", "(", ")", "import", "bcolz", "import", "dask", ".", "array", "as", "da", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "bcolz", ".", "ctable", "(", "rootdir", "=", "x", ")", "bc_chunklen", "=", "max", "(", "x", "[", "name", "]", ".", "chunklen", "for", "name", "in", "x", ".", "names", ")", "if", "chunksize", "is", "None", "and", "bc_chunklen", ">", "10000", ":", "chunksize", "=", "bc_chunklen", "categories", "=", "dict", "(", ")", "if", "categorize", ":", "for", "name", "in", "x", ".", "names", ":", "if", "(", "np", ".", "issubdtype", "(", "x", ".", "dtype", "[", "name", "]", ",", "np", ".", "string_", ")", "or", "np", ".", "issubdtype", "(", "x", ".", "dtype", "[", "name", "]", ",", "np", ".", "unicode_", ")", "or", "np", ".", "issubdtype", "(", "x", ".", "dtype", "[", "name", "]", ",", "np", ".", "object_", ")", ")", ":", "a", "=", "da", ".", "from_array", "(", "x", "[", "name", "]", ",", "chunks", "=", "(", "chunksize", "*", "len", "(", "x", ".", "names", ")", ",", ")", ")", "categories", "[", "name", "]", "=", "da", ".", "unique", "(", "a", ")", ".", "compute", "(", ")", "columns", "=", "tuple", "(", "x", ".", "dtype", ".", "names", ")", "divisions", "=", "tuple", "(", "range", "(", "0", ",", "len", "(", "x", ")", ",", "chunksize", ")", ")", "divisions", "=", "divisions", "+", "(", "len", "(", "x", ")", "-", "1", ",", ")", "if", "x", ".", "rootdir", ":", "token", "=", "tokenize", "(", "(", "x", ".", "rootdir", ",", "os", ".", "path", ".", "getmtime", "(", "x", ".", "rootdir", ")", ")", ",", "chunksize", ",", "categorize", ",", "index", ",", "kwargs", ",", ")", "else", ":", "token", "=", "tokenize", "(", "(", "id", "(", "x", ")", ",", "x", ".", "shape", ",", "x", ".", "dtype", ")", ",", "chunksize", ",", "categorize", ",", "index", ",", "kwargs", ")", "new_name", "=", "\"from_bcolz-\"", "+", "token", "dsk", "=", "{", "(", "new_name", ",", "i", ")", ":", "(", "dataframe_from_ctable", ",", "x", ",", "(", "slice", "(", "i", "*", "chunksize", ",", "(", "i", "+", "1", ")", "*", "chunksize", ")", ",", ")", ",", "columns", ",", "categories", ",", "lock", ",", ")", "for", "i", "in", "range", "(", "0", ",", "int", "(", "ceil", "(", "len", "(", "x", ")", "/", "chunksize", ")", ")", ")", "}", "meta", "=", "dataframe_from_ctable", "(", "x", ",", "slice", "(", "0", ",", "0", ")", ",", "columns", ",", "categories", ",", "lock", ")", "result", "=", "DataFrame", "(", "dsk", ",", "new_name", ",", "meta", ",", "divisions", ")", "if", "index", ":", "assert", "index", "in", "x", ".", "names", "a", "=", "da", ".", "from_array", "(", "x", "[", "index", "]", ",", "chunks", "=", "(", "chunksize", "*", "len", "(", "x", ".", "names", ")", ",", ")", ")", "q", "=", "np", ".", "linspace", "(", "0", ",", "100", ",", "len", "(", "x", ")", "//", "chunksize", "+", "2", ")", "divisions", "=", "tuple", "(", "da", ".", "percentile", "(", "a", ",", "q", ")", ".", "compute", "(", ")", ")", "return", "set_partition", "(", "result", ",", "index", ",", "divisions", ",", "*", "*", "kwargs", ")", "else", ":", "return", "result" ]
https://github.com/dask/dask/blob/c2b962fec1ba45440fe928869dc64cfe9cc36506/dask/dataframe/io/io.py#L235-L320
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
portia_examle/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.py
python
HTMLUnicodeInputStream.char
(self)
return char
Read one character from the stream or queue if available. Return EOF when EOF is reached.
Read one character from the stream or queue if available. Return EOF when EOF is reached.
[ "Read", "one", "character", "from", "the", "stream", "or", "queue", "if", "available", ".", "Return", "EOF", "when", "EOF", "is", "reached", "." ]
def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char
[ "def", "char", "(", "self", ")", ":", "# Read a new chunk from the input stream if necessary", "if", "self", ".", "chunkOffset", ">=", "self", ".", "chunkSize", ":", "if", "not", "self", ".", "readChunk", "(", ")", ":", "return", "EOF", "chunkOffset", "=", "self", ".", "chunkOffset", "char", "=", "self", ".", "chunk", "[", "chunkOffset", "]", "self", ".", "chunkOffset", "=", "chunkOffset", "+", "1", "return", "char" ]
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/pip/_vendor/html5lib/_inputstream.py#L240-L253
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/app_manager/suite_xml/features/mobile_ucr.py
python
ReportModuleSuiteHelper.__init__
(self, report_module)
[]
def __init__(self, report_module): assert isinstance(report_module, ReportModule) self.report_module = report_module self.domain = self.app.domain self._loaded = None
[ "def", "__init__", "(", "self", ",", "report_module", ")", ":", "assert", "isinstance", "(", "report_module", ",", "ReportModule", ")", "self", ".", "report_module", "=", "report_module", "self", ".", "domain", "=", "self", ".", "app", ".", "domain", "self", ".", "_loaded", "=", "None" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/app_manager/suite_xml/features/mobile_ucr.py#L77-L81
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/tools/titler.py
python
Titler._keep_layers_toggled
(self, widget)
[]
def _keep_layers_toggled(self, widget): global _keep_titler_data _keep_titler_data = widget.get_active()
[ "def", "_keep_layers_toggled", "(", "self", ",", "widget", ")", ":", "global", "_keep_titler_data", "_keep_titler_data", "=", "widget", ".", "get_active", "(", ")" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/tools/titler.py#L762-L764
learningequality/kolibri
d056dbc477aaf651ab843caa141a6a1e0a491046
kolibri/core/content/api.py
python
RemoteChannelViewSet._studio_response_to_kolibri_response
(cls, studioresp)
return resp
This modifies the JSON response returned by Kolibri Studio, and then transforms its keys that are more in line with the keys we return with /api/channels.
This modifies the JSON response returned by Kolibri Studio, and then transforms its keys that are more in line with the keys we return with /api/channels.
[ "This", "modifies", "the", "JSON", "response", "returned", "by", "Kolibri", "Studio", "and", "then", "transforms", "its", "keys", "that", "are", "more", "in", "line", "with", "the", "keys", "we", "return", "with", "/", "api", "/", "channels", "." ]
def _studio_response_to_kolibri_response(cls, studioresp): """ This modifies the JSON response returned by Kolibri Studio, and then transforms its keys that are more in line with the keys we return with /api/channels. """ # See the spec at: # https://docs.google.com/document/d/1FGR4XBEu7IbfoaEy-8xbhQx2PvIyxp0VugoPrMfo4R4/edit# # Go through the channel's included_languages and add in the native name # for each language included_languages = {} for code in studioresp.get("included_languages", []): included_languages[code] = cls._get_lang_native_name(code) channel_lang_name = cls._get_lang_native_name(studioresp.get("language")) resp = { "id": studioresp["id"], "description": studioresp.get("description"), "tagline": studioresp.get("tagline", None), "name": studioresp["name"], "lang_code": studioresp.get("language"), "lang_name": channel_lang_name, "thumbnail": studioresp.get("icon_encoding"), "public": studioresp.get("public", True), "total_resources": studioresp.get("total_resource_count", 0), "total_file_size": studioresp.get("published_size"), "version": studioresp.get("version", 0), "included_languages": included_languages, "last_updated": studioresp.get("last_published"), "version_notes": studioresp.get("version_notes"), } return resp
[ "def", "_studio_response_to_kolibri_response", "(", "cls", ",", "studioresp", ")", ":", "# See the spec at:", "# https://docs.google.com/document/d/1FGR4XBEu7IbfoaEy-8xbhQx2PvIyxp0VugoPrMfo4R4/edit#", "# Go through the channel's included_languages and add in the native name", "# for each language", "included_languages", "=", "{", "}", "for", "code", "in", "studioresp", ".", "get", "(", "\"included_languages\"", ",", "[", "]", ")", ":", "included_languages", "[", "code", "]", "=", "cls", ".", "_get_lang_native_name", "(", "code", ")", "channel_lang_name", "=", "cls", ".", "_get_lang_native_name", "(", "studioresp", ".", "get", "(", "\"language\"", ")", ")", "resp", "=", "{", "\"id\"", ":", "studioresp", "[", "\"id\"", "]", ",", "\"description\"", ":", "studioresp", ".", "get", "(", "\"description\"", ")", ",", "\"tagline\"", ":", "studioresp", ".", "get", "(", "\"tagline\"", ",", "None", ")", ",", "\"name\"", ":", "studioresp", "[", "\"name\"", "]", ",", "\"lang_code\"", ":", "studioresp", ".", "get", "(", "\"language\"", ")", ",", "\"lang_name\"", ":", "channel_lang_name", ",", "\"thumbnail\"", ":", "studioresp", ".", "get", "(", "\"icon_encoding\"", ")", ",", "\"public\"", ":", "studioresp", ".", "get", "(", "\"public\"", ",", "True", ")", ",", "\"total_resources\"", ":", "studioresp", ".", "get", "(", "\"total_resource_count\"", ",", "0", ")", ",", "\"total_file_size\"", ":", "studioresp", ".", "get", "(", "\"published_size\"", ")", ",", "\"version\"", ":", "studioresp", ".", "get", "(", "\"version\"", ",", "0", ")", ",", "\"included_languages\"", ":", "included_languages", ",", "\"last_updated\"", ":", "studioresp", ".", "get", "(", "\"last_published\"", ")", ",", "\"version_notes\"", ":", "studioresp", ".", "get", "(", "\"version_notes\"", ")", ",", "}", "return", "resp" ]
https://github.com/learningequality/kolibri/blob/d056dbc477aaf651ab843caa141a6a1e0a491046/kolibri/core/content/api.py#L1488-L1523
nicodv/kmodes
db8616692b617659e350dc8e0cd52de376a7ce07
kmodes/kmodes.py
python
k_modes
(X, n_clusters, max_iter, dissim, init, n_init, verbose, random_state, n_jobs)
return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best], all_epoch_costs[best]
k-modes algorithm
k-modes algorithm
[ "k", "-", "modes", "algorithm" ]
def k_modes(X, n_clusters, max_iter, dissim, init, n_init, verbose, random_state, n_jobs): """k-modes algorithm""" random_state = check_random_state(random_state) if sparse.issparse(X): raise TypeError("k-modes does not support sparse data.") X = check_array(X, dtype=None) # Convert the categorical values in X to integers for speed. # Based on the unique values in X, we can make a mapping to achieve this. X, enc_map = encode_features(X) n_points, n_attrs = X.shape assert n_clusters <= n_points, "Cannot have more clusters ({}) " \ "than data points ({}).".format(n_clusters, n_points) # Are there more n_clusters than unique rows? Then set the unique # rows as initial values and skip iteration. unique = get_unique_rows(X) n_unique = unique.shape[0] if n_unique <= n_clusters: max_iter = 0 n_init = 1 n_clusters = n_unique init = unique results = [] seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) if n_jobs == 1: for init_no in range(n_init): results.append(_k_modes_single(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seeds[init_no])) else: results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_k_modes_single)(X, n_clusters, n_points, n_attrs, max_iter, dissim, init, init_no, verbose, seed) for init_no, seed in enumerate(seeds)) all_centroids, all_labels, all_costs, all_n_iters, all_epoch_costs = zip(*results) best = np.argmin(all_costs) if n_init > 1 and verbose: print("Best run was number {}".format(best + 1)) return all_centroids[best], enc_map, all_labels[best], \ all_costs[best], all_n_iters[best], all_epoch_costs[best]
[ "def", "k_modes", "(", "X", ",", "n_clusters", ",", "max_iter", ",", "dissim", ",", "init", ",", "n_init", ",", "verbose", ",", "random_state", ",", "n_jobs", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "if", "sparse", ".", "issparse", "(", "X", ")", ":", "raise", "TypeError", "(", "\"k-modes does not support sparse data.\"", ")", "X", "=", "check_array", "(", "X", ",", "dtype", "=", "None", ")", "# Convert the categorical values in X to integers for speed.", "# Based on the unique values in X, we can make a mapping to achieve this.", "X", ",", "enc_map", "=", "encode_features", "(", "X", ")", "n_points", ",", "n_attrs", "=", "X", ".", "shape", "assert", "n_clusters", "<=", "n_points", ",", "\"Cannot have more clusters ({}) \"", "\"than data points ({}).\"", ".", "format", "(", "n_clusters", ",", "n_points", ")", "# Are there more n_clusters than unique rows? Then set the unique", "# rows as initial values and skip iteration.", "unique", "=", "get_unique_rows", "(", "X", ")", "n_unique", "=", "unique", ".", "shape", "[", "0", "]", "if", "n_unique", "<=", "n_clusters", ":", "max_iter", "=", "0", "n_init", "=", "1", "n_clusters", "=", "n_unique", "init", "=", "unique", "results", "=", "[", "]", "seeds", "=", "random_state", ".", "randint", "(", "np", ".", "iinfo", "(", "np", ".", "int32", ")", ".", "max", ",", "size", "=", "n_init", ")", "if", "n_jobs", "==", "1", ":", "for", "init_no", "in", "range", "(", "n_init", ")", ":", "results", ".", "append", "(", "_k_modes_single", "(", "X", ",", "n_clusters", ",", "n_points", ",", "n_attrs", ",", "max_iter", ",", "dissim", ",", "init", ",", "init_no", ",", "verbose", ",", "seeds", "[", "init_no", "]", ")", ")", "else", ":", "results", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "0", ")", "(", "delayed", "(", "_k_modes_single", ")", "(", "X", ",", "n_clusters", ",", "n_points", ",", "n_attrs", ",", "max_iter", ",", "dissim", ",", "init", ",", "init_no", ",", "verbose", ",", "seed", ")", "for", "init_no", ",", "seed", "in", "enumerate", "(", "seeds", ")", ")", "all_centroids", ",", "all_labels", ",", "all_costs", ",", "all_n_iters", ",", "all_epoch_costs", "=", "zip", "(", "*", "results", ")", "best", "=", "np", ".", "argmin", "(", "all_costs", ")", "if", "n_init", ">", "1", "and", "verbose", ":", "print", "(", "\"Best run was number {}\"", ".", "format", "(", "best", "+", "1", ")", ")", "return", "all_centroids", "[", "best", "]", ",", "enc_map", ",", "all_labels", "[", "best", "]", ",", "all_costs", "[", "best", "]", ",", "all_n_iters", "[", "best", "]", ",", "all_epoch_costs", "[", "best", "]" ]
https://github.com/nicodv/kmodes/blob/db8616692b617659e350dc8e0cd52de376a7ce07/kmodes/kmodes.py#L202-L246
bytefish/facerec
4071e1e79a50dbf1d1f2e061d24448576e5ac37d
py/facerec/validation.py
python
ValidationStrategy.print_results
(self)
[]
def print_results(self): print(self.model) for validation_result in self.validation_results: print(validation_result)
[ "def", "print_results", "(", "self", ")", ":", "print", "(", "self", ".", "model", ")", "for", "validation_result", "in", "self", ".", "validation_results", ":", "print", "(", "validation_result", ")" ]
https://github.com/bytefish/facerec/blob/4071e1e79a50dbf1d1f2e061d24448576e5ac37d/py/facerec/validation.py#L135-L138
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/F5/Integrations/F5_ASM/F5_ASM.py
python
f5_update_policy_file_type_command
(client: Client, policy_md5: str, file_type_id: str, file_type_name: str, query_string_length: int, check_post_data_length: bool, response_check: bool, check_request_length: bool, post_data_length: int, perform_staging: bool)
return command_results
Update a given file type from a certain policy. Args: client (Client): f5 client. policy_md5 (str): MD5 hash of the policy. file_type_id (str): ID of the file type. file_type_name (str): The new file type to add. query_string_length (int): Query string length. default is 100. check_post_data_length (bool): indicates if the user wishes check the length of data in post method. default is True. response_check (bool): Indicates if the user wishes to check the response. check_request_length (bool): Indicates if the user wishes to check the request length. post_data_length (int): post data length. perform_staging (bool): Indicates if the user wishes the new file type to be at staging.
Update a given file type from a certain policy.
[ "Update", "a", "given", "file", "type", "from", "a", "certain", "policy", "." ]
def f5_update_policy_file_type_command(client: Client, policy_md5: str, file_type_id: str, file_type_name: str, query_string_length: int, check_post_data_length: bool, response_check: bool, check_request_length: bool, post_data_length: int, perform_staging: bool) -> CommandResults: """ Update a given file type from a certain policy. Args: client (Client): f5 client. policy_md5 (str): MD5 hash of the policy. file_type_id (str): ID of the file type. file_type_name (str): The new file type to add. query_string_length (int): Query string length. default is 100. check_post_data_length (bool): indicates if the user wishes check the length of data in post method. default is True. response_check (bool): Indicates if the user wishes to check the response. check_request_length (bool): Indicates if the user wishes to check the request length. post_data_length (int): post data length. perform_staging (bool): Indicates if the user wishes the new file type to be at staging. """ result = client.update_policy_file_type(policy_md5, file_type_id, file_type_name, query_string_length, check_post_data_length, response_check, check_request_length, post_data_length, perform_staging) outputs, headers = build_output(OBJECT_FIELDS, result) readable_output = tableToMarkdown('f5 data for updating policy methods:', outputs, headers, removeNull=True) command_results = CommandResults( outputs_prefix='f5.FileType', outputs_key_field='id', readable_output=readable_output, outputs=remove_empty_elements(outputs), raw_response=result ) return command_results
[ "def", "f5_update_policy_file_type_command", "(", "client", ":", "Client", ",", "policy_md5", ":", "str", ",", "file_type_id", ":", "str", ",", "file_type_name", ":", "str", ",", "query_string_length", ":", "int", ",", "check_post_data_length", ":", "bool", ",", "response_check", ":", "bool", ",", "check_request_length", ":", "bool", ",", "post_data_length", ":", "int", ",", "perform_staging", ":", "bool", ")", "->", "CommandResults", ":", "result", "=", "client", ".", "update_policy_file_type", "(", "policy_md5", ",", "file_type_id", ",", "file_type_name", ",", "query_string_length", ",", "check_post_data_length", ",", "response_check", ",", "check_request_length", ",", "post_data_length", ",", "perform_staging", ")", "outputs", ",", "headers", "=", "build_output", "(", "OBJECT_FIELDS", ",", "result", ")", "readable_output", "=", "tableToMarkdown", "(", "'f5 data for updating policy methods:'", ",", "outputs", ",", "headers", ",", "removeNull", "=", "True", ")", "command_results", "=", "CommandResults", "(", "outputs_prefix", "=", "'f5.FileType'", ",", "outputs_key_field", "=", "'id'", ",", "readable_output", "=", "readable_output", ",", "outputs", "=", "remove_empty_elements", "(", "outputs", ")", ",", "raw_response", "=", "result", ")", "return", "command_results" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/F5/Integrations/F5_ASM/F5_ASM.py#L985-L1021
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/devops_sdk/v5_1/build/build_client.py
python
BuildClient.get_definitions
(self, project, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None, process_type=None, yaml_filename=None)
return self.GetDefinitionsResponseValue(response_value, continuation_token)
GetDefinitions. Gets a list of definitions. :param str project: Project ID or project name :param str name: If specified, filters to definitions whose names match this pattern. :param str repository_id: A repository ID. If specified, filters to definitions that use this repository. :param str repository_type: If specified, filters to definitions that have a repository of this type. :param str query_order: Indicates the order in which definitions should be returned. :param int top: The maximum number of definitions to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions. :param datetime min_metrics_time: If specified, indicates the date from which metrics should be included. :param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve. :param str path: If specified, filters to definitions under this folder. :param datetime built_after: If specified, filters to definitions that have builds after this date. :param datetime not_built_after: If specified, filters to definitions that do not have builds after this date. :param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned. :param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition. :param str task_id_filter: If specified, filters to definitions that use the specified task. :param int process_type: If specified, filters to definitions with the given process type. :param str yaml_filename: If specified, filters to YAML definitions that match the given filename. :rtype: :class:`<GetDefinitionsResponseValue>`
GetDefinitions. Gets a list of definitions. :param str project: Project ID or project name :param str name: If specified, filters to definitions whose names match this pattern. :param str repository_id: A repository ID. If specified, filters to definitions that use this repository. :param str repository_type: If specified, filters to definitions that have a repository of this type. :param str query_order: Indicates the order in which definitions should be returned. :param int top: The maximum number of definitions to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions. :param datetime min_metrics_time: If specified, indicates the date from which metrics should be included. :param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve. :param str path: If specified, filters to definitions under this folder. :param datetime built_after: If specified, filters to definitions that have builds after this date. :param datetime not_built_after: If specified, filters to definitions that do not have builds after this date. :param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned. :param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition. :param str task_id_filter: If specified, filters to definitions that use the specified task. :param int process_type: If specified, filters to definitions with the given process type. :param str yaml_filename: If specified, filters to YAML definitions that match the given filename. :rtype: :class:`<GetDefinitionsResponseValue>`
[ "GetDefinitions", ".", "Gets", "a", "list", "of", "definitions", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "name", ":", "If", "specified", "filters", "to", "definitions", "whose", "names", "match", "this", "pattern", ".", ":", "param", "str", "repository_id", ":", "A", "repository", "ID", ".", "If", "specified", "filters", "to", "definitions", "that", "use", "this", "repository", ".", ":", "param", "str", "repository_type", ":", "If", "specified", "filters", "to", "definitions", "that", "have", "a", "repository", "of", "this", "type", ".", ":", "param", "str", "query_order", ":", "Indicates", "the", "order", "in", "which", "definitions", "should", "be", "returned", ".", ":", "param", "int", "top", ":", "The", "maximum", "number", "of", "definitions", "to", "return", ".", ":", "param", "str", "continuation_token", ":", "A", "continuation", "token", "returned", "by", "a", "previous", "call", "to", "this", "method", "that", "can", "be", "used", "to", "return", "the", "next", "set", "of", "definitions", ".", ":", "param", "datetime", "min_metrics_time", ":", "If", "specified", "indicates", "the", "date", "from", "which", "metrics", "should", "be", "included", ".", ":", "param", "[", "int", "]", "definition_ids", ":", "A", "comma", "-", "delimited", "list", "that", "specifies", "the", "IDs", "of", "definitions", "to", "retrieve", ".", ":", "param", "str", "path", ":", "If", "specified", "filters", "to", "definitions", "under", "this", "folder", ".", ":", "param", "datetime", "built_after", ":", "If", "specified", "filters", "to", "definitions", "that", "have", "builds", "after", "this", "date", ".", ":", "param", "datetime", "not_built_after", ":", "If", "specified", "filters", "to", "definitions", "that", "do", "not", "have", "builds", "after", "this", "date", ".", ":", "param", "bool", "include_all_properties", ":", "Indicates", "whether", "the", "full", "definitions", "should", "be", "returned", ".", "By", "default", "shallow", "representations", "of", "the", "definitions", "are", "returned", ".", ":", "param", "bool", "include_latest_builds", ":", "Indicates", "whether", "to", "return", "the", "latest", "and", "latest", "completed", "builds", "for", "this", "definition", ".", ":", "param", "str", "task_id_filter", ":", "If", "specified", "filters", "to", "definitions", "that", "use", "the", "specified", "task", ".", ":", "param", "int", "process_type", ":", "If", "specified", "filters", "to", "definitions", "with", "the", "given", "process", "type", ".", ":", "param", "str", "yaml_filename", ":", "If", "specified", "filters", "to", "YAML", "definitions", "that", "match", "the", "given", "filename", ".", ":", "rtype", ":", ":", "class", ":", "<GetDefinitionsResponseValue", ">" ]
def get_definitions(self, project, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None, process_type=None, yaml_filename=None): """GetDefinitions. Gets a list of definitions. :param str project: Project ID or project name :param str name: If specified, filters to definitions whose names match this pattern. :param str repository_id: A repository ID. If specified, filters to definitions that use this repository. :param str repository_type: If specified, filters to definitions that have a repository of this type. :param str query_order: Indicates the order in which definitions should be returned. :param int top: The maximum number of definitions to return. :param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions. :param datetime min_metrics_time: If specified, indicates the date from which metrics should be included. :param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve. :param str path: If specified, filters to definitions under this folder. :param datetime built_after: If specified, filters to definitions that have builds after this date. :param datetime not_built_after: If specified, filters to definitions that do not have builds after this date. :param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned. :param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition. :param str task_id_filter: If specified, filters to definitions that use the specified task. :param int process_type: If specified, filters to definitions with the given process type. :param str yaml_filename: If specified, filters to YAML definitions that match the given filename. :rtype: :class:`<GetDefinitionsResponseValue>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if name is not None: query_parameters['name'] = self._serialize.query('name', name, 'str') if repository_id is not None: query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str') if repository_type is not None: query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str') if query_order is not None: query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') if min_metrics_time is not None: query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601') if definition_ids is not None: definition_ids = ",".join(map(str, definition_ids)) query_parameters['definitionIds'] = self._serialize.query('definition_ids', definition_ids, 'str') if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if built_after is not None: query_parameters['builtAfter'] = self._serialize.query('built_after', built_after, 'iso-8601') if not_built_after is not None: query_parameters['notBuiltAfter'] = self._serialize.query('not_built_after', not_built_after, 'iso-8601') if include_all_properties is not None: query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool') if include_latest_builds is not None: query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool') if task_id_filter is not None: query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str') if process_type is not None: query_parameters['processType'] = self._serialize.query('process_type', process_type, 'int') if yaml_filename is not None: query_parameters['yamlFilename'] = self._serialize.query('yaml_filename', yaml_filename, 'str') response = self._send(http_method='GET', location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6', version='5.1', route_values=route_values, query_parameters=query_parameters) response_value = self._deserialize('[BuildDefinitionReference]', self._unwrap_collection(response)) continuation_token = self._get_continuation_token(response) return self.GetDefinitionsResponseValue(response_value, continuation_token)
[ "def", "get_definitions", "(", "self", ",", "project", ",", "name", "=", "None", ",", "repository_id", "=", "None", ",", "repository_type", "=", "None", ",", "query_order", "=", "None", ",", "top", "=", "None", ",", "continuation_token", "=", "None", ",", "min_metrics_time", "=", "None", ",", "definition_ids", "=", "None", ",", "path", "=", "None", ",", "built_after", "=", "None", ",", "not_built_after", "=", "None", ",", "include_all_properties", "=", "None", ",", "include_latest_builds", "=", "None", ",", "task_id_filter", "=", "None", ",", "process_type", "=", "None", ",", "yaml_filename", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "name", "is", "not", "None", ":", "query_parameters", "[", "'name'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'name'", ",", "name", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "query_parameters", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "if", "repository_type", "is", "not", "None", ":", "query_parameters", "[", "'repositoryType'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'repository_type'", ",", "repository_type", ",", "'str'", ")", "if", "query_order", "is", "not", "None", ":", "query_parameters", "[", "'queryOrder'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'query_order'", ",", "query_order", ",", "'str'", ")", "if", "top", "is", "not", "None", ":", "query_parameters", "[", "'$top'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'top'", ",", "top", ",", "'int'", ")", "if", "continuation_token", "is", "not", "None", ":", "query_parameters", "[", "'continuationToken'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'continuation_token'", ",", "continuation_token", ",", "'str'", ")", "if", "min_metrics_time", "is", "not", "None", ":", "query_parameters", "[", "'minMetricsTime'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'min_metrics_time'", ",", "min_metrics_time", ",", "'iso-8601'", ")", "if", "definition_ids", "is", "not", "None", ":", "definition_ids", "=", "\",\"", ".", "join", "(", "map", "(", "str", ",", "definition_ids", ")", ")", "query_parameters", "[", "'definitionIds'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'definition_ids'", ",", "definition_ids", ",", "'str'", ")", "if", "path", "is", "not", "None", ":", "query_parameters", "[", "'path'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'path'", ",", "path", ",", "'str'", ")", "if", "built_after", "is", "not", "None", ":", "query_parameters", "[", "'builtAfter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'built_after'", ",", "built_after", ",", "'iso-8601'", ")", "if", "not_built_after", "is", "not", "None", ":", "query_parameters", "[", "'notBuiltAfter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'not_built_after'", ",", "not_built_after", ",", "'iso-8601'", ")", "if", "include_all_properties", "is", "not", "None", ":", "query_parameters", "[", "'includeAllProperties'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_all_properties'", ",", "include_all_properties", ",", "'bool'", ")", "if", "include_latest_builds", "is", "not", "None", ":", "query_parameters", "[", "'includeLatestBuilds'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'include_latest_builds'", ",", "include_latest_builds", ",", "'bool'", ")", "if", "task_id_filter", "is", "not", "None", ":", "query_parameters", "[", "'taskIdFilter'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'task_id_filter'", ",", "task_id_filter", ",", "'str'", ")", "if", "process_type", "is", "not", "None", ":", "query_parameters", "[", "'processType'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'process_type'", ",", "process_type", ",", "'int'", ")", "if", "yaml_filename", "is", "not", "None", ":", "query_parameters", "[", "'yamlFilename'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'yaml_filename'", ",", "yaml_filename", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'dbeaf647-6167-421a-bda9-c9327b25e2e6'", ",", "version", "=", "'5.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "response_value", "=", "self", ".", "_deserialize", "(", "'[BuildDefinitionReference]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")", "continuation_token", "=", "self", ".", "_get_continuation_token", "(", "response", ")", "return", "self", ".", "GetDefinitionsResponseValue", "(", "response_value", ",", "continuation_token", ")" ]
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/build/build_client.py#L714-L780
pokealarm/pokealarm
2edc3a978b7435a453d1917fbf436891fad1e18f
PokeAlarm/Filters/RaidFilter.py
python
RaidFilter.__init__
(self, mgr, name, data)
Initializes base parameters for a filter.
Initializes base parameters for a filter.
[ "Initializes", "base", "parameters", "for", "a", "filter", "." ]
def __init__(self, mgr, name, data): """ Initializes base parameters for a filter. """ super(RaidFilter, self).__init__(mgr, 'egg', name) # Monster ID - f.mon_ids in r.mon_id self.mon_ids = self.evaluate_attribute( # event_attribute='mon_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set( MonUtils.get_monster_id, 'monsters', data)) # Exclude Monster ID - f.monster_ids not contains r.ex_mon_id self.exclude_mon_ids = self.evaluate_attribute( # event_attribute='mon_id', eval_func=lambda d, v: not operator.contains(d, v), limit=BaseFilter.parse_as_set( MonUtils.get_monster_id, 'monsters_exclude', data)) # Distance self.min_dist = self.evaluate_attribute( # f.min_dist <= r.distance event_attribute='distance', eval_func=operator.le, limit=BaseFilter.parse_as_type(float, 'min_dist', data)) self.max_dist = self.evaluate_attribute( # f.max_dist <= r.distance event_attribute='distance', eval_func=operator.ge, limit=BaseFilter.parse_as_type(float, 'max_dist', data)) # Time Left self.min_time_left = self.evaluate_attribute( # f.min_time_left <= r.time_left event_attribute='time_left', eval_func=operator.le, limit=BaseFilter.parse_as_type(int, 'min_time_left', data)) self.max_time_left = self.evaluate_attribute( # f.max_time_left >= r.time_left event_attribute='time_left', eval_func=operator.ge, limit=BaseFilter.parse_as_type(int, 'max_time_left', data)) # Monster Info self.min_lvl = self.evaluate_attribute( # f.min_lvl <= r.mon_lvl event_attribute='raid_lvl', eval_func=operator.le, limit=BaseFilter.parse_as_type(int, 'min_raid_lvl', data)) self.max_lvl = self.evaluate_attribute( # f.max_lvl >= r.mon_lvl event_attribute='raid_lvl', eval_func=operator.ge, limit=BaseFilter.parse_as_type(int, 'max_raid_lvl', data)) # Monster Forms self.forms = self.evaluate_attribute( # f.forms in r.form_id event_attribute='form_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set(int, 'form_ids', data)) # Exclude Forms - f.forms_ids not contains m.ex_form_id self.exclude_form_ids = self.evaluate_attribute( # event_attribute='form_id', eval_func=lambda d, v: not operator.contains(d, v), limit=BaseFilter.parse_as_set(int, 'exclude_forms', data)) # Monster Costumes self.costumes = self.evaluate_attribute( # f.costumes in m.costume_id event_attribute='costume_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set(int, 'costume_ids', data)) # Exclude Costumes - f.costumes_ids not contains m.ex_costume_id self.exclude_costume_ids = self.evaluate_attribute( # event_attribute='costume_id', eval_func=lambda d, v: not operator.contains(d, v), limit=BaseFilter.parse_as_set(int, 'exclude_costumes', data)) # Gender self.genders = self.evaluate_attribute( # f.genders contains m.gender event_attribute='gender', eval_func=operator.contains, limit=BaseFilter.parse_as_set( MonUtils.get_gender_sym, 'genders', data)) # CP self.min_cp = self.evaluate_attribute( # f.min_cp <= r.cp event_attribute='cp', eval_func=operator.le, limit=BaseFilter.parse_as_type(int, 'min_cp', data)) self.max_cp = self.evaluate_attribute( # f.max_cp >= r.cp event_attribute='cp', eval_func=operator.ge, limit=BaseFilter.parse_as_type(int, 'max_cp', data)) # Quick Move self.quick_moves = self.evaluate_attribute( # f.q_ms contains r.q_m event_attribute='quick_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set( MonUtils.get_move_id, 'quick_moves', data)) # Charge Move self.charge_moves = self.evaluate_attribute( # f.c_ms contains r.c_m event_attribute='charge_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set( MonUtils.get_move_id, 'charge_moves', data)) # Gym name self.gym_name_contains = self.evaluate_attribute( # f.gn matches e.gn event_attribute='gym_name', eval_func=GymUtils.match_regex_dict, limit=BaseFilter.parse_as_set( GymUtils.create_regex, 'gym_name_contains', data)) self.gym_name_excludes = self.evaluate_attribute( # f.gn no-match e.gn event_attribute='gym_name', eval_func=GymUtils.not_match_regex_dict, limit=BaseFilter.parse_as_set( GymUtils.create_regex, 'gym_name_excludes', data)) # Gym sponsor self.sponsored = self.evaluate_attribute( # event_attribute='sponsor_id', eval_func=lambda y, x: (x > 0) == y, limit=BaseFilter.parse_as_type(bool, 'sponsored', data)) # Gym park self.park_contains = self.evaluate_attribute( # f.gp matches e.gp event_attribute='park', eval_func=GymUtils.match_regex_dict, limit=BaseFilter.parse_as_set( GymUtils.create_regex, 'park_contains', data)) self.is_ex_eligible = self.evaluate_attribute( event_attribute='ex_eligible', eval_func=operator.eq, limit=BaseFilter.parse_as_type(bool, 'is_ex_eligible', data) ) # Team Info self.old_team = self.evaluate_attribute( # f.ctis contains m.cti event_attribute='current_team_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set( GymUtils.get_team_id, 'current_teams', data)) # Weather self.weather_ids = self.evaluate_attribute( # f.w_ids contains m.w_id event_attribute='weather_id', eval_func=operator.contains, limit=BaseFilter.parse_as_set(get_weather_id, 'weather', data)) # Geofences self.geofences = BaseFilter.parse_as_list(str, 'geofences', data) # Custom DTS self.custom_dts = BaseFilter.parse_as_dict( str, str, 'custom_dts', data) # Missing Info self.is_missing_info = BaseFilter.parse_as_type( bool, 'is_missing_info', data) # Reject leftover parameters for key in data: raise ValueError("'{}' is not a recognized parameter for" " Raid filters".format(key))
[ "def", "__init__", "(", "self", ",", "mgr", ",", "name", ",", "data", ")", ":", "super", "(", "RaidFilter", ",", "self", ")", ".", "__init__", "(", "mgr", ",", "'egg'", ",", "name", ")", "# Monster ID - f.mon_ids in r.mon_id", "self", ".", "mon_ids", "=", "self", ".", "evaluate_attribute", "(", "#", "event_attribute", "=", "'mon_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "MonUtils", ".", "get_monster_id", ",", "'monsters'", ",", "data", ")", ")", "# Exclude Monster ID - f.monster_ids not contains r.ex_mon_id", "self", ".", "exclude_mon_ids", "=", "self", ".", "evaluate_attribute", "(", "#", "event_attribute", "=", "'mon_id'", ",", "eval_func", "=", "lambda", "d", ",", "v", ":", "not", "operator", ".", "contains", "(", "d", ",", "v", ")", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "MonUtils", ".", "get_monster_id", ",", "'monsters_exclude'", ",", "data", ")", ")", "# Distance", "self", ".", "min_dist", "=", "self", ".", "evaluate_attribute", "(", "# f.min_dist <= r.distance", "event_attribute", "=", "'distance'", ",", "eval_func", "=", "operator", ".", "le", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "float", ",", "'min_dist'", ",", "data", ")", ")", "self", ".", "max_dist", "=", "self", ".", "evaluate_attribute", "(", "# f.max_dist <= r.distance", "event_attribute", "=", "'distance'", ",", "eval_func", "=", "operator", ".", "ge", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "float", ",", "'max_dist'", ",", "data", ")", ")", "# Time Left", "self", ".", "min_time_left", "=", "self", ".", "evaluate_attribute", "(", "# f.min_time_left <= r.time_left", "event_attribute", "=", "'time_left'", ",", "eval_func", "=", "operator", ".", "le", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'min_time_left'", ",", "data", ")", ")", "self", ".", "max_time_left", "=", "self", ".", "evaluate_attribute", "(", "# f.max_time_left >= r.time_left", "event_attribute", "=", "'time_left'", ",", "eval_func", "=", "operator", ".", "ge", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'max_time_left'", ",", "data", ")", ")", "# Monster Info", "self", ".", "min_lvl", "=", "self", ".", "evaluate_attribute", "(", "# f.min_lvl <= r.mon_lvl", "event_attribute", "=", "'raid_lvl'", ",", "eval_func", "=", "operator", ".", "le", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'min_raid_lvl'", ",", "data", ")", ")", "self", ".", "max_lvl", "=", "self", ".", "evaluate_attribute", "(", "# f.max_lvl >= r.mon_lvl", "event_attribute", "=", "'raid_lvl'", ",", "eval_func", "=", "operator", ".", "ge", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'max_raid_lvl'", ",", "data", ")", ")", "# Monster Forms", "self", ".", "forms", "=", "self", ".", "evaluate_attribute", "(", "# f.forms in r.form_id", "event_attribute", "=", "'form_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "int", ",", "'form_ids'", ",", "data", ")", ")", "# Exclude Forms - f.forms_ids not contains m.ex_form_id", "self", ".", "exclude_form_ids", "=", "self", ".", "evaluate_attribute", "(", "#", "event_attribute", "=", "'form_id'", ",", "eval_func", "=", "lambda", "d", ",", "v", ":", "not", "operator", ".", "contains", "(", "d", ",", "v", ")", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "int", ",", "'exclude_forms'", ",", "data", ")", ")", "# Monster Costumes", "self", ".", "costumes", "=", "self", ".", "evaluate_attribute", "(", "# f.costumes in m.costume_id", "event_attribute", "=", "'costume_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "int", ",", "'costume_ids'", ",", "data", ")", ")", "# Exclude Costumes - f.costumes_ids not contains m.ex_costume_id", "self", ".", "exclude_costume_ids", "=", "self", ".", "evaluate_attribute", "(", "#", "event_attribute", "=", "'costume_id'", ",", "eval_func", "=", "lambda", "d", ",", "v", ":", "not", "operator", ".", "contains", "(", "d", ",", "v", ")", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "int", ",", "'exclude_costumes'", ",", "data", ")", ")", "# Gender", "self", ".", "genders", "=", "self", ".", "evaluate_attribute", "(", "# f.genders contains m.gender", "event_attribute", "=", "'gender'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "MonUtils", ".", "get_gender_sym", ",", "'genders'", ",", "data", ")", ")", "# CP", "self", ".", "min_cp", "=", "self", ".", "evaluate_attribute", "(", "# f.min_cp <= r.cp", "event_attribute", "=", "'cp'", ",", "eval_func", "=", "operator", ".", "le", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'min_cp'", ",", "data", ")", ")", "self", ".", "max_cp", "=", "self", ".", "evaluate_attribute", "(", "# f.max_cp >= r.cp", "event_attribute", "=", "'cp'", ",", "eval_func", "=", "operator", ".", "ge", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "int", ",", "'max_cp'", ",", "data", ")", ")", "# Quick Move", "self", ".", "quick_moves", "=", "self", ".", "evaluate_attribute", "(", "# f.q_ms contains r.q_m", "event_attribute", "=", "'quick_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "MonUtils", ".", "get_move_id", ",", "'quick_moves'", ",", "data", ")", ")", "# Charge Move", "self", ".", "charge_moves", "=", "self", ".", "evaluate_attribute", "(", "# f.c_ms contains r.c_m", "event_attribute", "=", "'charge_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "MonUtils", ".", "get_move_id", ",", "'charge_moves'", ",", "data", ")", ")", "# Gym name", "self", ".", "gym_name_contains", "=", "self", ".", "evaluate_attribute", "(", "# f.gn matches e.gn", "event_attribute", "=", "'gym_name'", ",", "eval_func", "=", "GymUtils", ".", "match_regex_dict", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "GymUtils", ".", "create_regex", ",", "'gym_name_contains'", ",", "data", ")", ")", "self", ".", "gym_name_excludes", "=", "self", ".", "evaluate_attribute", "(", "# f.gn no-match e.gn", "event_attribute", "=", "'gym_name'", ",", "eval_func", "=", "GymUtils", ".", "not_match_regex_dict", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "GymUtils", ".", "create_regex", ",", "'gym_name_excludes'", ",", "data", ")", ")", "# Gym sponsor", "self", ".", "sponsored", "=", "self", ".", "evaluate_attribute", "(", "#", "event_attribute", "=", "'sponsor_id'", ",", "eval_func", "=", "lambda", "y", ",", "x", ":", "(", "x", ">", "0", ")", "==", "y", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "bool", ",", "'sponsored'", ",", "data", ")", ")", "# Gym park", "self", ".", "park_contains", "=", "self", ".", "evaluate_attribute", "(", "# f.gp matches e.gp", "event_attribute", "=", "'park'", ",", "eval_func", "=", "GymUtils", ".", "match_regex_dict", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "GymUtils", ".", "create_regex", ",", "'park_contains'", ",", "data", ")", ")", "self", ".", "is_ex_eligible", "=", "self", ".", "evaluate_attribute", "(", "event_attribute", "=", "'ex_eligible'", ",", "eval_func", "=", "operator", ".", "eq", ",", "limit", "=", "BaseFilter", ".", "parse_as_type", "(", "bool", ",", "'is_ex_eligible'", ",", "data", ")", ")", "# Team Info", "self", ".", "old_team", "=", "self", ".", "evaluate_attribute", "(", "# f.ctis contains m.cti", "event_attribute", "=", "'current_team_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "GymUtils", ".", "get_team_id", ",", "'current_teams'", ",", "data", ")", ")", "# Weather", "self", ".", "weather_ids", "=", "self", ".", "evaluate_attribute", "(", "# f.w_ids contains m.w_id", "event_attribute", "=", "'weather_id'", ",", "eval_func", "=", "operator", ".", "contains", ",", "limit", "=", "BaseFilter", ".", "parse_as_set", "(", "get_weather_id", ",", "'weather'", ",", "data", ")", ")", "# Geofences", "self", ".", "geofences", "=", "BaseFilter", ".", "parse_as_list", "(", "str", ",", "'geofences'", ",", "data", ")", "# Custom DTS", "self", ".", "custom_dts", "=", "BaseFilter", ".", "parse_as_dict", "(", "str", ",", "str", ",", "'custom_dts'", ",", "data", ")", "# Missing Info", "self", ".", "is_missing_info", "=", "BaseFilter", ".", "parse_as_type", "(", "bool", ",", "'is_missing_info'", ",", "data", ")", "# Reject leftover parameters", "for", "key", "in", "data", ":", "raise", "ValueError", "(", "\"'{}' is not a recognized parameter for\"", "\" Raid filters\"", ".", "format", "(", "key", ")", ")" ]
https://github.com/pokealarm/pokealarm/blob/2edc3a978b7435a453d1917fbf436891fad1e18f/PokeAlarm/Filters/RaidFilter.py#L14-L156
lazylibrarian/LazyLibrarian
ae3c14e9db9328ce81765e094ab2a14ed7155624
lib/pythontwitter/__init__.py
python
Status.__init__
(self, created_at=None, favorited=None, favorite_count=None, id=None, text=None, location=None, user=None, in_reply_to_screen_name=None, in_reply_to_user_id=None, in_reply_to_status_id=None, truncated=None, source=None, now=None, urls=None, user_mentions=None, hashtags=None, media=None, geo=None, place=None, coordinates=None, contributors=None, retweeted=None, retweeted_status=None, current_user_retweet=None, retweet_count=None, possibly_sensitive=None, scopes=None, withheld_copyright=None, withheld_in_countries=None, withheld_scope=None)
An object to hold a Twitter status message. This class is normally instantiated by the twitter.Api class and returned in a sequence. Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007" Args: created_at: The time this status message was posted. [Optional] favorited: Whether this is a favorite of the authenticated user. [Optional] favorite_count: Number of times this status message has been favorited. [Optional] id: The unique id of this status message. [Optional] text: The text of this status message. [Optional] location: the geolocation string associated with this message. [Optional] relative_created_at: A human readable string representing the posting time. [Optional] user: A twitter.User instance representing the person posting the message. [Optional] now: The current time, if the client chooses to set it. Defaults to the wall clock time. [Optional] urls: user_mentions: hashtags: geo: place: coordinates: contributors: retweeted: retweeted_status: current_user_retweet: retweet_count: possibly_sensitive: scopes: withheld_copyright: withheld_in_countries: withheld_scope:
An object to hold a Twitter status message.
[ "An", "object", "to", "hold", "a", "Twitter", "status", "message", "." ]
def __init__(self, created_at=None, favorited=None, favorite_count=None, id=None, text=None, location=None, user=None, in_reply_to_screen_name=None, in_reply_to_user_id=None, in_reply_to_status_id=None, truncated=None, source=None, now=None, urls=None, user_mentions=None, hashtags=None, media=None, geo=None, place=None, coordinates=None, contributors=None, retweeted=None, retweeted_status=None, current_user_retweet=None, retweet_count=None, possibly_sensitive=None, scopes=None, withheld_copyright=None, withheld_in_countries=None, withheld_scope=None): '''An object to hold a Twitter status message. This class is normally instantiated by the twitter.Api class and returned in a sequence. Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007" Args: created_at: The time this status message was posted. [Optional] favorited: Whether this is a favorite of the authenticated user. [Optional] favorite_count: Number of times this status message has been favorited. [Optional] id: The unique id of this status message. [Optional] text: The text of this status message. [Optional] location: the geolocation string associated with this message. [Optional] relative_created_at: A human readable string representing the posting time. [Optional] user: A twitter.User instance representing the person posting the message. [Optional] now: The current time, if the client chooses to set it. Defaults to the wall clock time. [Optional] urls: user_mentions: hashtags: geo: place: coordinates: contributors: retweeted: retweeted_status: current_user_retweet: retweet_count: possibly_sensitive: scopes: withheld_copyright: withheld_in_countries: withheld_scope: ''' self.created_at = created_at self.favorited = favorited self.favorite_count = favorite_count self.id = id self.text = text self.location = location self.user = user self.now = now self.in_reply_to_screen_name = in_reply_to_screen_name self.in_reply_to_user_id = in_reply_to_user_id self.in_reply_to_status_id = in_reply_to_status_id self.truncated = truncated self.retweeted = retweeted self.source = source self.urls = urls self.user_mentions = user_mentions self.hashtags = hashtags self.media = media self.geo = geo self.place = place self.coordinates = coordinates self.contributors = contributors self.retweeted_status = retweeted_status self.current_user_retweet = current_user_retweet self.retweet_count = retweet_count self.possibly_sensitive = possibly_sensitive self.scopes = scopes self.withheld_copyright = withheld_copyright self.withheld_in_countries = withheld_in_countries self.withheld_scope = withheld_scope
[ "def", "__init__", "(", "self", ",", "created_at", "=", "None", ",", "favorited", "=", "None", ",", "favorite_count", "=", "None", ",", "id", "=", "None", ",", "text", "=", "None", ",", "location", "=", "None", ",", "user", "=", "None", ",", "in_reply_to_screen_name", "=", "None", ",", "in_reply_to_user_id", "=", "None", ",", "in_reply_to_status_id", "=", "None", ",", "truncated", "=", "None", ",", "source", "=", "None", ",", "now", "=", "None", ",", "urls", "=", "None", ",", "user_mentions", "=", "None", ",", "hashtags", "=", "None", ",", "media", "=", "None", ",", "geo", "=", "None", ",", "place", "=", "None", ",", "coordinates", "=", "None", ",", "contributors", "=", "None", ",", "retweeted", "=", "None", ",", "retweeted_status", "=", "None", ",", "current_user_retweet", "=", "None", ",", "retweet_count", "=", "None", ",", "possibly_sensitive", "=", "None", ",", "scopes", "=", "None", ",", "withheld_copyright", "=", "None", ",", "withheld_in_countries", "=", "None", ",", "withheld_scope", "=", "None", ")", ":", "self", ".", "created_at", "=", "created_at", "self", ".", "favorited", "=", "favorited", "self", ".", "favorite_count", "=", "favorite_count", "self", ".", "id", "=", "id", "self", ".", "text", "=", "text", "self", ".", "location", "=", "location", "self", ".", "user", "=", "user", "self", ".", "now", "=", "now", "self", ".", "in_reply_to_screen_name", "=", "in_reply_to_screen_name", "self", ".", "in_reply_to_user_id", "=", "in_reply_to_user_id", "self", ".", "in_reply_to_status_id", "=", "in_reply_to_status_id", "self", ".", "truncated", "=", "truncated", "self", ".", "retweeted", "=", "retweeted", "self", ".", "source", "=", "source", "self", ".", "urls", "=", "urls", "self", ".", "user_mentions", "=", "user_mentions", "self", ".", "hashtags", "=", "hashtags", "self", ".", "media", "=", "media", "self", ".", "geo", "=", "geo", "self", ".", "place", "=", "place", "self", ".", "coordinates", "=", "coordinates", "self", ".", "contributors", "=", "contributors", "self", ".", "retweeted_status", "=", "retweeted_status", "self", ".", "current_user_retweet", "=", "current_user_retweet", "self", ".", "retweet_count", "=", "retweet_count", "self", ".", "possibly_sensitive", "=", "possibly_sensitive", "self", ".", "scopes", "=", "scopes", "self", ".", "withheld_copyright", "=", "withheld_copyright", "self", ".", "withheld_in_countries", "=", "withheld_in_countries", "self", ".", "withheld_scope", "=", "withheld_scope" ]
https://github.com/lazylibrarian/LazyLibrarian/blob/ae3c14e9db9328ce81765e094ab2a14ed7155624/lib/pythontwitter/__init__.py#L137-L242
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Smokescreen_IllusionBLACK/Integrations/Smokescreen_IllusionBLACK/Smokescreen_IllusionBLACK.py
python
Client.is_subdomain_decoy
(self, subdomain)
Checks if the subdomain is an IllusionBLACK TI decoy Args: subdomain: The subdomain to check. For example: experience.illusionblack.com Returns: True if subdomain is a decoy else False
Checks if the subdomain is an IllusionBLACK TI decoy Args: subdomain: The subdomain to check. For example: experience.illusionblack.com Returns: True if subdomain is a decoy else False
[ "Checks", "if", "the", "subdomain", "is", "an", "IllusionBLACK", "TI", "decoy", "Args", ":", "subdomain", ":", "The", "subdomain", "to", "check", ".", "For", "example", ":", "experience", ".", "illusionblack", ".", "com", "Returns", ":", "True", "if", "subdomain", "is", "a", "decoy", "else", "False" ]
def is_subdomain_decoy(self, subdomain): """ Checks if the subdomain is an IllusionBLACK TI decoy Args: subdomain: The subdomain to check. For example: experience.illusionblack.com Returns: True if subdomain is a decoy else False """ response = self._http_request( method="GET", url_suffix="/decoy/recon", ok_codes=(200,) ) ti_decoys: List = response["items"] for ti_decoy in ti_decoys: if subdomain == ti_decoy["name"]: return "True", {"IllusionBlack.IsSubdomainDecoy": {"Subdomain": subdomain, "Value": True}} return "False", {"IllusionBlack.IsSubdomainDecoy": {"Subdomain": subdomain, "Value": False}}
[ "def", "is_subdomain_decoy", "(", "self", ",", "subdomain", ")", ":", "response", "=", "self", ".", "_http_request", "(", "method", "=", "\"GET\"", ",", "url_suffix", "=", "\"/decoy/recon\"", ",", "ok_codes", "=", "(", "200", ",", ")", ")", "ti_decoys", ":", "List", "=", "response", "[", "\"items\"", "]", "for", "ti_decoy", "in", "ti_decoys", ":", "if", "subdomain", "==", "ti_decoy", "[", "\"name\"", "]", ":", "return", "\"True\"", ",", "{", "\"IllusionBlack.IsSubdomainDecoy\"", ":", "{", "\"Subdomain\"", ":", "subdomain", ",", "\"Value\"", ":", "True", "}", "}", "return", "\"False\"", ",", "{", "\"IllusionBlack.IsSubdomainDecoy\"", ":", "{", "\"Subdomain\"", ":", "subdomain", ",", "\"Value\"", ":", "False", "}", "}" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Smokescreen_IllusionBLACK/Integrations/Smokescreen_IllusionBLACK/Smokescreen_IllusionBLACK.py#L141-L157
tslearn-team/tslearn
6c93071b385a89112b82799ae5870daeca1ab88b
tslearn/bases/bases.py
python
BaseModelPackage._to_dict
(self, output=None, hyper_parameters_only=False)
return d
Get model hyper-parameters and model-parameters as a dict that can be saved to disk. Returns ------- params : dict dict with relevant attributes sufficient to describe the model.
Get model hyper-parameters and model-parameters as a dict that can be saved to disk.
[ "Get", "model", "hyper", "-", "parameters", "and", "model", "-", "parameters", "as", "a", "dict", "that", "can", "be", "saved", "to", "disk", "." ]
def _to_dict(self, output=None, hyper_parameters_only=False): """ Get model hyper-parameters and model-parameters as a dict that can be saved to disk. Returns ------- params : dict dict with relevant attributes sufficient to describe the model. """ if not self._is_fitted(): raise NotFittedError("Model must be fit before it can be packaged") d = {'hyper_params': self.get_params(), 'model_params': self._get_model_params()} # This is just for json support to convert numpy arrays to lists if output == 'json': d['model_params'] = BaseModelPackage._listify(d['model_params']) d['hyper_params'] = BaseModelPackage._listify(d['hyper_params']) elif output == 'hdf5': d['hyper_params'] = \ BaseModelPackage._none_to_str(d['hyper_params']) if hyper_parameters_only: del d["model_params"] return d
[ "def", "_to_dict", "(", "self", ",", "output", "=", "None", ",", "hyper_parameters_only", "=", "False", ")", ":", "if", "not", "self", ".", "_is_fitted", "(", ")", ":", "raise", "NotFittedError", "(", "\"Model must be fit before it can be packaged\"", ")", "d", "=", "{", "'hyper_params'", ":", "self", ".", "get_params", "(", ")", ",", "'model_params'", ":", "self", ".", "_get_model_params", "(", ")", "}", "# This is just for json support to convert numpy arrays to lists", "if", "output", "==", "'json'", ":", "d", "[", "'model_params'", "]", "=", "BaseModelPackage", ".", "_listify", "(", "d", "[", "'model_params'", "]", ")", "d", "[", "'hyper_params'", "]", "=", "BaseModelPackage", ".", "_listify", "(", "d", "[", "'hyper_params'", "]", ")", "elif", "output", "==", "'hdf5'", ":", "d", "[", "'hyper_params'", "]", "=", "BaseModelPackage", ".", "_none_to_str", "(", "d", "[", "'hyper_params'", "]", ")", "if", "hyper_parameters_only", ":", "del", "d", "[", "\"model_params\"", "]", "return", "d" ]
https://github.com/tslearn-team/tslearn/blob/6c93071b385a89112b82799ae5870daeca1ab88b/tslearn/bases/bases.py#L63-L92
deepfakes/faceswap
09c7d8aca3c608d1afad941ea78e9fd9b64d9219
tools/manual/faceviewer/viewport.py
python
TKFace.photo
(self)
return self._photo
:class:`tkinter.PhotoImage`: The face in a format that can be placed on the :class:`~tools.manual.faceviewer.frame.FacesViewer` canvas.
:class:`tkinter.PhotoImage`: The face in a format that can be placed on the :class:`~tools.manual.faceviewer.frame.FacesViewer` canvas.
[ ":", "class", ":", "tkinter", ".", "PhotoImage", ":", "The", "face", "in", "a", "format", "that", "can", "be", "placed", "on", "the", ":", "class", ":", "~tools", ".", "manual", ".", "faceviewer", ".", "frame", ".", "FacesViewer", "canvas", "." ]
def photo(self): """ :class:`tkinter.PhotoImage`: The face in a format that can be placed on the :class:`~tools.manual.faceviewer.frame.FacesViewer` canvas. """ return self._photo
[ "def", "photo", "(", "self", ")", ":", "return", "self", ".", "_photo" ]
https://github.com/deepfakes/faceswap/blob/09c7d8aca3c608d1afad941ea78e9fd9b64d9219/tools/manual/faceviewer/viewport.py#L1033-L1036
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/grizzled/grizzled/db/base.py
python
DB.rollback
(self)
Roll the current transaction back. :raise Warning: Non-fatal warning :raise Error: Error
Roll the current transaction back.
[ "Roll", "the", "current", "transaction", "back", "." ]
def rollback(self): """ Roll the current transaction back. :raise Warning: Non-fatal warning :raise Error: Error """ dbi = self.__driver.get_import() try: self.__db.rollback() except dbi.Warning, val: raise Warning(val) except dbi.Error, val: raise Error(val)
[ "def", "rollback", "(", "self", ")", ":", "dbi", "=", "self", ".", "__driver", ".", "get_import", "(", ")", "try", ":", "self", ".", "__db", ".", "rollback", "(", ")", "except", "dbi", ".", "Warning", ",", "val", ":", "raise", "Warning", "(", "val", ")", "except", "dbi", ".", "Error", ",", "val", ":", "raise", "Error", "(", "val", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/grizzled/grizzled/db/base.py#L631-L644
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/paho/mqtt/client.py
python
Client.on_connect_fail
(self, func)
Define the connection failure callback implementation Expected signature is: on_connect_fail(client, userdata) client: the client instance for this callback userdata: the private user data as set in Client() or userdata_set() Decorator: @client.connect_fail_callback() (```client``` is the name of the instance which this callback is being attached to)
Define the connection failure callback implementation
[ "Define", "the", "connection", "failure", "callback", "implementation" ]
def on_connect_fail(self, func): """ Define the connection failure callback implementation Expected signature is: on_connect_fail(client, userdata) client: the client instance for this callback userdata: the private user data as set in Client() or userdata_set() Decorator: @client.connect_fail_callback() (```client``` is the name of the instance which this callback is being attached to) """ with self._callback_mutex: self._on_connect_fail = func
[ "def", "on_connect_fail", "(", "self", ",", "func", ")", ":", "with", "self", ".", "_callback_mutex", ":", "self", ".", "_on_connect_fail", "=", "func" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/paho/mqtt/client.py#L1910-L1924
vaab/colour
11f138eb7841d2045160b378a2eec0c2321144c0
colour.py
python
hsl2rgb
(hsl)
return r, g, b
Convert HSL representation towards RGB :param h: Hue, position around the chromatic circle (h=1 equiv h=0) :param s: Saturation, color saturation (0=full gray, 1=full color) :param l: Ligthness, Overhaul lightness (0=full black, 1=full white) :rtype: 3-uple for RGB values in float between 0 and 1 Hue, Saturation, Range from Lightness is a float between 0 and 1 Note that Hue can be set to any value but as it is a rotation around the chromatic circle, any value above 1 or below 0 can be expressed by a value between 0 and 1 (Note that h=0 is equiv to h=1). This algorithm came from: http://www.easyrgb.com/index.php?X=MATH&H=19#text19 Here are some quick notion of HSL to RGB conversion: >>> from colour import hsl2rgb With a lightness put at 0, RGB is always rgbblack >>> hsl2rgb((0.0, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.5, 0.0)) (0.0, 0.0, 0.0) Same for lightness put at 1, RGB is always rgbwhite >>> hsl2rgb((0.0, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.5, 1.0)) (1.0, 1.0, 1.0) With saturation put at 0, the RGB should be equal to Lightness: >>> hsl2rgb((0.0, 0.0, 0.25)) (0.25, 0.25, 0.25) >>> hsl2rgb((0.5, 0.0, 0.5)) (0.5, 0.5, 0.5) >>> hsl2rgb((0.5, 0.0, 0.75)) (0.75, 0.75, 0.75) With saturation put at 1, and lightness put to 0.5, we can find normal full red, green, blue colors: >>> hsl2rgb((0 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1.0/3 , 1.0, 0.5)) (0.0, 1.0, 0.0) >>> hsl2rgb((2.0/3 , 1.0, 0.5)) (0.0, 0.0, 1.0) Of course: >>> hsl2rgb((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Saturation must be between 0 and 1. And: >>> hsl2rgb((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Lightness must be between 0 and 1.
Convert HSL representation towards RGB
[ "Convert", "HSL", "representation", "towards", "RGB" ]
def hsl2rgb(hsl): """Convert HSL representation towards RGB :param h: Hue, position around the chromatic circle (h=1 equiv h=0) :param s: Saturation, color saturation (0=full gray, 1=full color) :param l: Ligthness, Overhaul lightness (0=full black, 1=full white) :rtype: 3-uple for RGB values in float between 0 and 1 Hue, Saturation, Range from Lightness is a float between 0 and 1 Note that Hue can be set to any value but as it is a rotation around the chromatic circle, any value above 1 or below 0 can be expressed by a value between 0 and 1 (Note that h=0 is equiv to h=1). This algorithm came from: http://www.easyrgb.com/index.php?X=MATH&H=19#text19 Here are some quick notion of HSL to RGB conversion: >>> from colour import hsl2rgb With a lightness put at 0, RGB is always rgbblack >>> hsl2rgb((0.0, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.5, 0.0)) (0.0, 0.0, 0.0) Same for lightness put at 1, RGB is always rgbwhite >>> hsl2rgb((0.0, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.5, 1.0)) (1.0, 1.0, 1.0) With saturation put at 0, the RGB should be equal to Lightness: >>> hsl2rgb((0.0, 0.0, 0.25)) (0.25, 0.25, 0.25) >>> hsl2rgb((0.5, 0.0, 0.5)) (0.5, 0.5, 0.5) >>> hsl2rgb((0.5, 0.0, 0.75)) (0.75, 0.75, 0.75) With saturation put at 1, and lightness put to 0.5, we can find normal full red, green, blue colors: >>> hsl2rgb((0 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1.0/3 , 1.0, 0.5)) (0.0, 1.0, 0.0) >>> hsl2rgb((2.0/3 , 1.0, 0.5)) (0.0, 0.0, 1.0) Of course: >>> hsl2rgb((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Saturation must be between 0 and 1. And: >>> hsl2rgb((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Lightness must be between 0 and 1. """ h, s, l = [float(v) for v in hsl] if not (0.0 - FLOAT_ERROR <= s <= 1.0 + FLOAT_ERROR): raise ValueError("Saturation must be between 0 and 1.") if not (0.0 - FLOAT_ERROR <= l <= 1.0 + FLOAT_ERROR): raise ValueError("Lightness must be between 0 and 1.") if s == 0: return l, l, l if l < 0.5: v2 = l * (1.0 + s) else: v2 = (l + s) - (s * l) v1 = 2.0 * l - v2 r = _hue2rgb(v1, v2, h + (1.0 / 3)) g = _hue2rgb(v1, v2, h) b = _hue2rgb(v1, v2, h - (1.0 / 3)) return r, g, b
[ "def", "hsl2rgb", "(", "hsl", ")", ":", "h", ",", "s", ",", "l", "=", "[", "float", "(", "v", ")", "for", "v", "in", "hsl", "]", "if", "not", "(", "0.0", "-", "FLOAT_ERROR", "<=", "s", "<=", "1.0", "+", "FLOAT_ERROR", ")", ":", "raise", "ValueError", "(", "\"Saturation must be between 0 and 1.\"", ")", "if", "not", "(", "0.0", "-", "FLOAT_ERROR", "<=", "l", "<=", "1.0", "+", "FLOAT_ERROR", ")", ":", "raise", "ValueError", "(", "\"Lightness must be between 0 and 1.\"", ")", "if", "s", "==", "0", ":", "return", "l", ",", "l", ",", "l", "if", "l", "<", "0.5", ":", "v2", "=", "l", "*", "(", "1.0", "+", "s", ")", "else", ":", "v2", "=", "(", "l", "+", "s", ")", "-", "(", "s", "*", "l", ")", "v1", "=", "2.0", "*", "l", "-", "v2", "r", "=", "_hue2rgb", "(", "v1", ",", "v2", ",", "h", "+", "(", "1.0", "/", "3", ")", ")", "g", "=", "_hue2rgb", "(", "v1", ",", "v2", ",", "h", ")", "b", "=", "_hue2rgb", "(", "v1", ",", "v2", ",", "h", "-", "(", "1.0", "/", "3", ")", ")", "return", "r", ",", "g", ",", "b" ]
https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L272-L367
mattupstate/flask-principal
0b5cd06b464ae8b60939857784bda86c03dda1eb
flask_principal.py
python
Principal.identity_loader
(self, f)
return f
Decorator to define a function as an identity loader. An identity loader function is called before request to find any provided identities. The first found identity is used to load from. For example:: app = Flask(__name__) principals = Principal(app) @principals.identity_loader def load_identity_from_weird_usecase(): return Identity('ali')
Decorator to define a function as an identity loader.
[ "Decorator", "to", "define", "a", "function", "as", "an", "identity", "loader", "." ]
def identity_loader(self, f): """Decorator to define a function as an identity loader. An identity loader function is called before request to find any provided identities. The first found identity is used to load from. For example:: app = Flask(__name__) principals = Principal(app) @principals.identity_loader def load_identity_from_weird_usecase(): return Identity('ali') """ self.identity_loaders.appendleft(f) return f
[ "def", "identity_loader", "(", "self", ",", "f", ")", ":", "self", ".", "identity_loaders", ".", "appendleft", "(", "f", ")", "return", "f" ]
https://github.com/mattupstate/flask-principal/blob/0b5cd06b464ae8b60939857784bda86c03dda1eb/flask_principal.py#L422-L439
charlesq34/pointnet2
42926632a3c33461aebfbee2d829098b30a23aaa
utils/tf_util.py
python
conv2d
(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=1e-3, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None)
2D convolution with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor
2D convolution with non-linear operation.
[ "2D", "convolution", "with", "non", "-", "linear", "operation", "." ]
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=1e-3, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 2D convolution with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size assert(data_format=='NHWC' or data_format=='NCHW') if data_format == 'NHWC': num_in_channels = inputs.get_shape()[-1].value elif data_format=='NCHW': num_in_channels = inputs.get_shape()[1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) if bn: outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn', data_format=data_format) if activation_fn is not None: outputs = activation_fn(outputs) return outputs
[ "def", "conv2d", "(", "inputs", ",", "num_output_channels", ",", "kernel_size", ",", "scope", ",", "stride", "=", "[", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ",", "data_format", "=", "'NHWC'", ",", "use_xavier", "=", "True", ",", "stddev", "=", "1e-3", ",", "weight_decay", "=", "None", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "bn", "=", "False", ",", "bn_decay", "=", "None", ",", "is_training", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ")", "as", "sc", ":", "kernel_h", ",", "kernel_w", "=", "kernel_size", "assert", "(", "data_format", "==", "'NHWC'", "or", "data_format", "==", "'NCHW'", ")", "if", "data_format", "==", "'NHWC'", ":", "num_in_channels", "=", "inputs", ".", "get_shape", "(", ")", "[", "-", "1", "]", ".", "value", "elif", "data_format", "==", "'NCHW'", ":", "num_in_channels", "=", "inputs", ".", "get_shape", "(", ")", "[", "1", "]", ".", "value", "kernel_shape", "=", "[", "kernel_h", ",", "kernel_w", ",", "num_in_channels", ",", "num_output_channels", "]", "kernel", "=", "_variable_with_weight_decay", "(", "'weights'", ",", "shape", "=", "kernel_shape", ",", "use_xavier", "=", "use_xavier", ",", "stddev", "=", "stddev", ",", "wd", "=", "weight_decay", ")", "stride_h", ",", "stride_w", "=", "stride", "outputs", "=", "tf", ".", "nn", ".", "conv2d", "(", "inputs", ",", "kernel", ",", "[", "1", ",", "stride_h", ",", "stride_w", ",", "1", "]", ",", "padding", "=", "padding", ",", "data_format", "=", "data_format", ")", "biases", "=", "_variable_on_cpu", "(", "'biases'", ",", "[", "num_output_channels", "]", ",", "tf", ".", "constant_initializer", "(", "0.0", ")", ")", "outputs", "=", "tf", ".", "nn", ".", "bias_add", "(", "outputs", ",", "biases", ",", "data_format", "=", "data_format", ")", "if", "bn", ":", "outputs", "=", "batch_norm_for_conv2d", "(", "outputs", ",", "is_training", ",", "bn_decay", "=", "bn_decay", ",", "scope", "=", "'bn'", ",", "data_format", "=", "data_format", ")", "if", "activation_fn", "is", "not", "None", ":", "outputs", "=", "activation_fn", "(", "outputs", ")", "return", "outputs" ]
https://github.com/charlesq34/pointnet2/blob/42926632a3c33461aebfbee2d829098b30a23aaa/utils/tf_util.py#L120-L185
clovaai/assembled-cnn
9cdc29761d828ecd3708ea13e5acc44c696ec30f
datasets/build_ethz_food101.py
python
_process_image_files_batch
(thread_index, offsets, output_filenames, filenames, labels)
하나의 스레드 단위에서 이미지 리스트를 읽어 TRRecord 타입으로 변환하는 함수 :param thread_index: 현재 작업중인 thread 번호. :param offsets: offset list. 이미지 목록 중 현재 스레드에서 처리해야 할 offset 값으로 shard 갯수만큼 리스트로 제공 :param output_filenames: 출력 파일 이름으로 shard 갯수만큼 리스트로 제공. :param filenames: 처리해야 할 전체 이미지 파일 리스트 :param labels: 처리해야 할 전체 이미지 레이블 리스트
하나의 스레드 단위에서 이미지 리스트를 읽어 TRRecord 타입으로 변환하는 함수 :param thread_index: 현재 작업중인 thread 번호. :param offsets: offset list. 이미지 목록 중 현재 스레드에서 처리해야 할 offset 값으로 shard 갯수만큼 리스트로 제공 :param output_filenames: 출력 파일 이름으로 shard 갯수만큼 리스트로 제공. :param filenames: 처리해야 할 전체 이미지 파일 리스트 :param labels: 처리해야 할 전체 이미지 레이블 리스트
[ "하나의", "스레드", "단위에서", "이미지", "리스트를", "읽어", "TRRecord", "타입으로", "변환하는", "함수", ":", "param", "thread_index", ":", "현재", "작업중인", "thread", "번호", ".", ":", "param", "offsets", ":", "offset", "list", ".", "이미지", "목록", "중", "현재", "스레드에서", "처리해야", "할", "offset", "값으로", "shard", "갯수만큼", "리스트로", "제공", ":", "param", "output_filenames", ":", "출력", "파일", "이름으로", "shard", "갯수만큼", "리스트로", "제공", ".", ":", "param", "filenames", ":", "처리해야", "할", "전체", "이미지", "파일", "리스트", ":", "param", "labels", ":", "처리해야", "할", "전체", "이미지", "레이블", "리스트" ]
def _process_image_files_batch(thread_index, offsets, output_filenames, filenames, labels): """ 하나의 스레드 단위에서 이미지 리스트를 읽어 TRRecord 타입으로 변환하는 함수 :param thread_index: 현재 작업중인 thread 번호. :param offsets: offset list. 이미지 목록 중 현재 스레드에서 처리해야 할 offset 값으로 shard 갯수만큼 리스트로 제공 :param output_filenames: 출력 파일 이름으로 shard 갯수만큼 리스트로 제공. :param filenames: 처리해야 할 전체 이미지 파일 리스트 :param labels: 처리해야 할 전체 이미지 레이블 리스트 """ assert len(offsets) == len(output_filenames) assert len(filenames) == len(labels) num_files_in_thread = offsets[-1][1] - offsets[0][0] counter = 0 # 하나의 thread 에는 여러 개의 shard 가 할당될 수 있다. for offset, output_filename in zip(offsets, output_filenames): output_file = os.path.join(FLAGS.output_dir, output_filename) writer = tf.python_io.TFRecordWriter(output_file) # offset 에는 현재 shard 에 대한 (start, end) offset이 저장되어 있음. files_in_shard = np.arange(offset[0], offset[1], dtype=int) shard_counter = 0 for i in files_in_shard: filename = filenames[i] label = labels[i] try: image_data, height, width = _process_image(filename) except ValueError: dataset_utils.log('[thread %2d]: Invalid image found. %s - [skip].' % (thread_index, filename)) continue example = data_util.convert_to_example_without_bbox(image_data, 'jpg', label, height, width) writer.write(example.SerializeToString()) counter += 1 shard_counter += 1 if not counter % 1000: dataset_utils.log('%s [thread %2d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)) writer.close() dataset_utils.log('%s [thread %2d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file))
[ "def", "_process_image_files_batch", "(", "thread_index", ",", "offsets", ",", "output_filenames", ",", "filenames", ",", "labels", ")", ":", "assert", "len", "(", "offsets", ")", "==", "len", "(", "output_filenames", ")", "assert", "len", "(", "filenames", ")", "==", "len", "(", "labels", ")", "num_files_in_thread", "=", "offsets", "[", "-", "1", "]", "[", "1", "]", "-", "offsets", "[", "0", "]", "[", "0", "]", "counter", "=", "0", "# 하나의 thread 에는 여러 개의 shard 가 할당될 수 있다.", "for", "offset", ",", "output_filename", "in", "zip", "(", "offsets", ",", "output_filenames", ")", ":", "output_file", "=", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "output_dir", ",", "output_filename", ")", "writer", "=", "tf", ".", "python_io", ".", "TFRecordWriter", "(", "output_file", ")", "# offset 에는 현재 shard 에 대한 (start, end) offset이 저장되어 있음.", "files_in_shard", "=", "np", ".", "arange", "(", "offset", "[", "0", "]", ",", "offset", "[", "1", "]", ",", "dtype", "=", "int", ")", "shard_counter", "=", "0", "for", "i", "in", "files_in_shard", ":", "filename", "=", "filenames", "[", "i", "]", "label", "=", "labels", "[", "i", "]", "try", ":", "image_data", ",", "height", ",", "width", "=", "_process_image", "(", "filename", ")", "except", "ValueError", ":", "dataset_utils", ".", "log", "(", "'[thread %2d]: Invalid image found. %s - [skip].'", "%", "(", "thread_index", ",", "filename", ")", ")", "continue", "example", "=", "data_util", ".", "convert_to_example_without_bbox", "(", "image_data", ",", "'jpg'", ",", "label", ",", "height", ",", "width", ")", "writer", ".", "write", "(", "example", ".", "SerializeToString", "(", ")", ")", "counter", "+=", "1", "shard_counter", "+=", "1", "if", "not", "counter", "%", "1000", ":", "dataset_utils", ".", "log", "(", "'%s [thread %2d]: Processed %d of %d images in thread batch.'", "%", "(", "datetime", ".", "now", "(", ")", ",", "thread_index", ",", "counter", ",", "num_files_in_thread", ")", ")", "writer", ".", "close", "(", ")", "dataset_utils", ".", "log", "(", "'%s [thread %2d]: Wrote %d images to %s'", "%", "(", "datetime", ".", "now", "(", ")", ",", "thread_index", ",", "shard_counter", ",", "output_file", ")", ")" ]
https://github.com/clovaai/assembled-cnn/blob/9cdc29761d828ecd3708ea13e5acc44c696ec30f/datasets/build_ethz_food101.py#L56-L99
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/imaplib.py
python
IMAP4.subscribe
(self, mailbox)
return self._simple_command('SUBSCRIBE', mailbox)
Subscribe to new mailbox. (typ, [data]) = <instance>.subscribe(mailbox)
Subscribe to new mailbox.
[ "Subscribe", "to", "new", "mailbox", "." ]
def subscribe(self, mailbox): """Subscribe to new mailbox. (typ, [data]) = <instance>.subscribe(mailbox) """ return self._simple_command('SUBSCRIBE', mailbox)
[ "def", "subscribe", "(", "self", ",", "mailbox", ")", ":", "return", "self", ".", "_simple_command", "(", "'SUBSCRIBE'", ",", "mailbox", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/imaplib.py#L774-L779
ucsb-seclab/karonte
427ac313e596f723e40768b95d13bd7a9fc92fd8
eval/multi_bin/network-facing/run_utils.py
python
get_reg_used
(p, cfg, addr, idx, s_addr)
return None
Finds whether and which register is used to store a string address. :param addr: basic block address :param idx: statement idx of the statement referencing a string :param s: string referenced in the statement pointed by idx :return: the register name the string is assigned to
Finds whether and which register is used to store a string address.
[ "Finds", "whether", "and", "which", "register", "is", "used", "to", "store", "a", "string", "address", "." ]
def get_reg_used(p, cfg, addr, idx, s_addr): """ Finds whether and which register is used to store a string address. :param addr: basic block address :param idx: statement idx of the statement referencing a string :param s: string referenced in the statement pointed by idx :return: the register name the string is assigned to """ if not are_parameters_in_registers(p): raise Exception("Parameters are not in registers") block = p.factory.block(addr) stmt = block.vex.statements[idx] no = cfg.get_any_node(addr) # sometimes strings are reference indirectly through an address contained in the # text section endianess = '<I' if 'LE' in p.arch.memory_endness else '>I' s_addr_2 = None try: s_addr_2 = struct.unpack(endianess, ''.join(p.loader.memory.read_bytes(s_addr, p.arch.bytes)))[0] except: pass if hasattr(stmt, 'offset'): return p.arch.register_names[stmt.offset] # damn! The string is not assigned directly to a register, but to a tmp. # It means we have to find out what register is used to pass the string # to the function call # save the function manager, CFGAccurate will change it fm = p.kb.functions cfga = p.analyses.CFGAccurate(starts=(no.function_address,), keep_state=True, call_depth=0) no = cfga.get_any_node(addr) if not no: cfga = p.analyses.CFGAccurate(starts=(addr,), keep_state=True, call_depth=0) no = cfga.get_any_node(addr) if not no: return None args = get_args_call(p, no) # restore the old function manager p.kb.functions = fm for _, vals in args.iteritems(): for o, v in vals: if v in (s_addr, s_addr_2): return p.arch.register_names[o] return None
[ "def", "get_reg_used", "(", "p", ",", "cfg", ",", "addr", ",", "idx", ",", "s_addr", ")", ":", "if", "not", "are_parameters_in_registers", "(", "p", ")", ":", "raise", "Exception", "(", "\"Parameters are not in registers\"", ")", "block", "=", "p", ".", "factory", ".", "block", "(", "addr", ")", "stmt", "=", "block", ".", "vex", ".", "statements", "[", "idx", "]", "no", "=", "cfg", ".", "get_any_node", "(", "addr", ")", "# sometimes strings are reference indirectly through an address contained in the", "# text section", "endianess", "=", "'<I'", "if", "'LE'", "in", "p", ".", "arch", ".", "memory_endness", "else", "'>I'", "s_addr_2", "=", "None", "try", ":", "s_addr_2", "=", "struct", ".", "unpack", "(", "endianess", ",", "''", ".", "join", "(", "p", ".", "loader", ".", "memory", ".", "read_bytes", "(", "s_addr", ",", "p", ".", "arch", ".", "bytes", ")", ")", ")", "[", "0", "]", "except", ":", "pass", "if", "hasattr", "(", "stmt", ",", "'offset'", ")", ":", "return", "p", ".", "arch", ".", "register_names", "[", "stmt", ".", "offset", "]", "# damn! The string is not assigned directly to a register, but to a tmp.", "# It means we have to find out what register is used to pass the string", "# to the function call", "# save the function manager, CFGAccurate will change it", "fm", "=", "p", ".", "kb", ".", "functions", "cfga", "=", "p", ".", "analyses", ".", "CFGAccurate", "(", "starts", "=", "(", "no", ".", "function_address", ",", ")", ",", "keep_state", "=", "True", ",", "call_depth", "=", "0", ")", "no", "=", "cfga", ".", "get_any_node", "(", "addr", ")", "if", "not", "no", ":", "cfga", "=", "p", ".", "analyses", ".", "CFGAccurate", "(", "starts", "=", "(", "addr", ",", ")", ",", "keep_state", "=", "True", ",", "call_depth", "=", "0", ")", "no", "=", "cfga", ".", "get_any_node", "(", "addr", ")", "if", "not", "no", ":", "return", "None", "args", "=", "get_args_call", "(", "p", ",", "no", ")", "# restore the old function manager", "p", ".", "kb", ".", "functions", "=", "fm", "for", "_", ",", "vals", "in", "args", ".", "iteritems", "(", ")", ":", "for", "o", ",", "v", "in", "vals", ":", "if", "v", "in", "(", "s_addr", ",", "s_addr_2", ")", ":", "return", "p", ".", "arch", ".", "register_names", "[", "o", "]", "return", "None" ]
https://github.com/ucsb-seclab/karonte/blob/427ac313e596f723e40768b95d13bd7a9fc92fd8/eval/multi_bin/network-facing/run_utils.py#L272-L324
digidotcom/xbee-python
0757f4be0017530c205175fbee8f9f61be9614d1
digi/xbee/packets/socket.py
python
SocketCreatePacket.create_packet
(raw, operating_mode)
return SocketCreatePacket(raw[4], IPProtocol.get(raw[5]), op_mode=operating_mode)
Override method. Returns: :class:`.SocketCreatePacket`. Raises: InvalidPacketException: if the bytearray length is less than 7. (start delim. + length (2 bytes) + frame type + frame id + protocol + checksum = 7 bytes). InvalidPacketException: if the length field of 'raw' is different from its real length. (length field: bytes 2 and 3) InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`. InvalidPacketException: if the calculated checksum is different from the checksum field value (last byte). InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.SOCKET_CREATE`. InvalidOperatingModeException: if `operating_mode` is not supported. .. seealso:: | :meth:`.XBeePacket.create_packet` | :meth:`.XBeeAPIPacket._check_api_packet`
Override method.
[ "Override", "method", "." ]
def create_packet(raw, operating_mode): """ Override method. Returns: :class:`.SocketCreatePacket`. Raises: InvalidPacketException: if the bytearray length is less than 7. (start delim. + length (2 bytes) + frame type + frame id + protocol + checksum = 7 bytes). InvalidPacketException: if the length field of 'raw' is different from its real length. (length field: bytes 2 and 3) InvalidPacketException: if the first byte of 'raw' is not the header byte. See :class:`.SpecialByte`. InvalidPacketException: if the calculated checksum is different from the checksum field value (last byte). InvalidPacketException: if the frame type is not :attr:`.ApiFrameType.SOCKET_CREATE`. InvalidOperatingModeException: if `operating_mode` is not supported. .. seealso:: | :meth:`.XBeePacket.create_packet` | :meth:`.XBeeAPIPacket._check_api_packet` """ if operating_mode not in (OperatingMode.ESCAPED_API_MODE, OperatingMode.API_MODE): raise InvalidOperatingModeException(op_mode=operating_mode) XBeeAPIPacket._check_api_packet( raw, min_length=SocketCreatePacket.__MIN_PACKET_LENGTH) if raw[3] != ApiFrameType.SOCKET_CREATE.code: raise InvalidPacketException( message="This packet is not a Socket Create packet.") return SocketCreatePacket(raw[4], IPProtocol.get(raw[5]), op_mode=operating_mode)
[ "def", "create_packet", "(", "raw", ",", "operating_mode", ")", ":", "if", "operating_mode", "not", "in", "(", "OperatingMode", ".", "ESCAPED_API_MODE", ",", "OperatingMode", ".", "API_MODE", ")", ":", "raise", "InvalidOperatingModeException", "(", "op_mode", "=", "operating_mode", ")", "XBeeAPIPacket", ".", "_check_api_packet", "(", "raw", ",", "min_length", "=", "SocketCreatePacket", ".", "__MIN_PACKET_LENGTH", ")", "if", "raw", "[", "3", "]", "!=", "ApiFrameType", ".", "SOCKET_CREATE", ".", "code", ":", "raise", "InvalidPacketException", "(", "message", "=", "\"This packet is not a Socket Create packet.\"", ")", "return", "SocketCreatePacket", "(", "raw", "[", "4", "]", ",", "IPProtocol", ".", "get", "(", "raw", "[", "5", "]", ")", ",", "op_mode", "=", "operating_mode", ")" ]
https://github.com/digidotcom/xbee-python/blob/0757f4be0017530c205175fbee8f9f61be9614d1/digi/xbee/packets/socket.py#L68-L104
apache/tvm
6eb4ed813ebcdcd9558f0906a1870db8302ff1e0
python/tvm/topi/x86/tensor_intrin.py
python
dot_16x1x16_uint8_int8_int32_skylake
()
return te.decl_tensor_intrin( C.op, _intrin_func, binds={data: a_buffer, kernel: b_buffer}, default_buffer_params=buffer_params, )
Int8 dot product by every 4 elements using AVX512 Skylake instructions. This function takes two arrays of uint8 and int8 datatype -- data[4] and kernel[16][4] -- and computes a dot product of data[4] with every 4 elements of kernels, resulting in output[16] of int32 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int32(uint8 data[4], int8 kernel[16][4], int32 output[16]){ for (int i = 0; i < 16; i++){ output[i] = 0; for (int k = 0; k < 4; k++){ output[i] += data[k] * kernel[i][k] } } } Physically, the kernel array sits in an AVX512 vector register and the data[4] is broadcasted to another AVX512 vector register. This function returns a TensorIntrin that can be used to tensorize a schedule. Returns ------- intrin : TensorIntrin The Skylake int8 TensorIntrin that can be used in tensorizing schedule
Int8 dot product by every 4 elements using AVX512 Skylake instructions. This function takes two arrays of uint8 and int8 datatype -- data[4] and kernel[16][4] -- and computes a dot product of data[4] with every 4 elements of kernels, resulting in output[16] of int32 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int32(uint8 data[4], int8 kernel[16][4], int32 output[16]){ for (int i = 0; i < 16; i++){ output[i] = 0; for (int k = 0; k < 4; k++){ output[i] += data[k] * kernel[i][k] } } }
[ "Int8", "dot", "product", "by", "every", "4", "elements", "using", "AVX512", "Skylake", "instructions", ".", "This", "function", "takes", "two", "arrays", "of", "uint8", "and", "int8", "datatype", "--", "data", "[", "4", "]", "and", "kernel", "[", "16", "]", "[", "4", "]", "--", "and", "computes", "a", "dot", "product", "of", "data", "[", "4", "]", "with", "every", "4", "elements", "of", "kernels", "resulting", "in", "output", "[", "16", "]", "of", "int32", "datatype", ".", "The", "pseudo", "code", "is", "as", "follows", ".", "..", "code", "-", "block", "::", "c", "void", "dot_16x1x16_uint8_int8_int32", "(", "uint8", "data", "[", "4", "]", "int8", "kernel", "[", "16", "]", "[", "4", "]", "int32", "output", "[", "16", "]", ")", "{", "for", "(", "int", "i", "=", "0", ";", "i", "<", "16", ";", "i", "++", ")", "{", "output", "[", "i", "]", "=", "0", ";", "for", "(", "int", "k", "=", "0", ";", "k", "<", "4", ";", "k", "++", ")", "{", "output", "[", "i", "]", "+", "=", "data", "[", "k", "]", "*", "kernel", "[", "i", "]", "[", "k", "]", "}", "}", "}" ]
def dot_16x1x16_uint8_int8_int32_skylake(): """ Int8 dot product by every 4 elements using AVX512 Skylake instructions. This function takes two arrays of uint8 and int8 datatype -- data[4] and kernel[16][4] -- and computes a dot product of data[4] with every 4 elements of kernels, resulting in output[16] of int32 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int32(uint8 data[4], int8 kernel[16][4], int32 output[16]){ for (int i = 0; i < 16; i++){ output[i] = 0; for (int k = 0; k < 4; k++){ output[i] += data[k] * kernel[i][k] } } } Physically, the kernel array sits in an AVX512 vector register and the data[4] is broadcasted to another AVX512 vector register. This function returns a TensorIntrin that can be used to tensorize a schedule. Returns ------- intrin : TensorIntrin The Skylake int8 TensorIntrin that can be used in tensorizing schedule """ int32_lanes = get_simd_32bit_lanes() num_int8_elements = 4 # 4 int8 elements in int32 data = te.placeholder((num_int8_elements,), dtype="uint8", name="data") kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="int8", name="kernel") k = te.reduce_axis((0, num_int8_elements), name="k") C = te.compute( (int32_lanes,), lambda i: te.sum(data[k].astype("int32") * kernel[i, k].astype("int32"), axis=k), name="C", ) a_buffer = tvm.tir.decl_buffer( data.shape, dtype="uint8", name="a_buffer", offset_factor=1, strides=[1] ) b_buffer = tvm.tir.decl_buffer( kernel.shape, dtype="int8", name="b_buffer", offset_factor=1, strides=[te.var("ldw"), 1] ) def _intrin_func(ins, outs): def _instr(index): # int_lx32 - output datatype after pmaddubs - 16 bits to number of lanes # int_8xl - input datatype to pmaddubs - 8 bits to number of lanes # int_32xl - output datatype after pmaddw - 32 bits per number of lanes if int32_lanes == 4: int_lx32 = "int16x8" int_8xl = "int8x16" int_32xl = "int32x4" pmaddubs = "llvm.x86.ssse3.pmadd.ub.sw.128" pmaddw = "llvm.x86.sse2.pmadd.wd" elif int32_lanes == 8: int_lx32 = "int16x16" int_8xl = "int8x32" int_32xl = "int32x8" pmaddubs = "llvm.x86.avx2.pmadd.ub.sw" pmaddw = "llvm.x86.avx2.pmadd.wd" elif int32_lanes == 16: int_lx32 = "int16x32" int_8xl = "int8x64" int_32xl = "int32x16" pmaddubs = "llvm.x86.avx512.pmaddubs.w.512" pmaddw = "llvm.x86.avx512.pmaddw.d.512" ib = tvm.tir.ir_builder.create() if index == 1: ib.emit(outs[0].vstore(0, tvm.tir.const(0, int_32xl))) return ib.get() a_int8 = ins[0].vload([0], "uint8x4") re_int32 = tvm.tir.call_intrin("int32", "tir.reinterpret", a_int8) vec_ai32 = re_int32.astype(int_32xl) vec_a = tvm.tir.call_intrin(int_8xl, "tir.reinterpret", vec_ai32) vec_b = ins[1].vload([0, 0], int_8xl) vec_one = tvm.tir.const(1, int_lx32) pair_reduction = tvm.tir.call_llvm_pure_intrin( int_lx32, pmaddubs, tvm.tir.const(0, "uint32"), vec_a, vec_b, ) quad_reduction = tvm.tir.call_llvm_pure_intrin( int_32xl, pmaddw, tvm.tir.const(0, "uint32"), pair_reduction, vec_one, ) if index == 0: ib.emit(outs[0].vstore(0, quad_reduction)) else: ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], int_32xl))) return ib.get() # body, reset, update return _instr(0), _instr(1), _instr(2) buffer_params = {"offset_factor": 1} return te.decl_tensor_intrin( C.op, _intrin_func, binds={data: a_buffer, kernel: b_buffer}, default_buffer_params=buffer_params, )
[ "def", "dot_16x1x16_uint8_int8_int32_skylake", "(", ")", ":", "int32_lanes", "=", "get_simd_32bit_lanes", "(", ")", "num_int8_elements", "=", "4", "# 4 int8 elements in int32", "data", "=", "te", ".", "placeholder", "(", "(", "num_int8_elements", ",", ")", ",", "dtype", "=", "\"uint8\"", ",", "name", "=", "\"data\"", ")", "kernel", "=", "te", ".", "placeholder", "(", "(", "int32_lanes", ",", "num_int8_elements", ")", ",", "dtype", "=", "\"int8\"", ",", "name", "=", "\"kernel\"", ")", "k", "=", "te", ".", "reduce_axis", "(", "(", "0", ",", "num_int8_elements", ")", ",", "name", "=", "\"k\"", ")", "C", "=", "te", ".", "compute", "(", "(", "int32_lanes", ",", ")", ",", "lambda", "i", ":", "te", ".", "sum", "(", "data", "[", "k", "]", ".", "astype", "(", "\"int32\"", ")", "*", "kernel", "[", "i", ",", "k", "]", ".", "astype", "(", "\"int32\"", ")", ",", "axis", "=", "k", ")", ",", "name", "=", "\"C\"", ",", ")", "a_buffer", "=", "tvm", ".", "tir", ".", "decl_buffer", "(", "data", ".", "shape", ",", "dtype", "=", "\"uint8\"", ",", "name", "=", "\"a_buffer\"", ",", "offset_factor", "=", "1", ",", "strides", "=", "[", "1", "]", ")", "b_buffer", "=", "tvm", ".", "tir", ".", "decl_buffer", "(", "kernel", ".", "shape", ",", "dtype", "=", "\"int8\"", ",", "name", "=", "\"b_buffer\"", ",", "offset_factor", "=", "1", ",", "strides", "=", "[", "te", ".", "var", "(", "\"ldw\"", ")", ",", "1", "]", ")", "def", "_intrin_func", "(", "ins", ",", "outs", ")", ":", "def", "_instr", "(", "index", ")", ":", "# int_lx32 - output datatype after pmaddubs - 16 bits to number of lanes", "# int_8xl - input datatype to pmaddubs - 8 bits to number of lanes", "# int_32xl - output datatype after pmaddw - 32 bits per number of lanes", "if", "int32_lanes", "==", "4", ":", "int_lx32", "=", "\"int16x8\"", "int_8xl", "=", "\"int8x16\"", "int_32xl", "=", "\"int32x4\"", "pmaddubs", "=", "\"llvm.x86.ssse3.pmadd.ub.sw.128\"", "pmaddw", "=", "\"llvm.x86.sse2.pmadd.wd\"", "elif", "int32_lanes", "==", "8", ":", "int_lx32", "=", "\"int16x16\"", "int_8xl", "=", "\"int8x32\"", "int_32xl", "=", "\"int32x8\"", "pmaddubs", "=", "\"llvm.x86.avx2.pmadd.ub.sw\"", "pmaddw", "=", "\"llvm.x86.avx2.pmadd.wd\"", "elif", "int32_lanes", "==", "16", ":", "int_lx32", "=", "\"int16x32\"", "int_8xl", "=", "\"int8x64\"", "int_32xl", "=", "\"int32x16\"", "pmaddubs", "=", "\"llvm.x86.avx512.pmaddubs.w.512\"", "pmaddw", "=", "\"llvm.x86.avx512.pmaddw.d.512\"", "ib", "=", "tvm", ".", "tir", ".", "ir_builder", ".", "create", "(", ")", "if", "index", "==", "1", ":", "ib", ".", "emit", "(", "outs", "[", "0", "]", ".", "vstore", "(", "0", ",", "tvm", ".", "tir", ".", "const", "(", "0", ",", "int_32xl", ")", ")", ")", "return", "ib", ".", "get", "(", ")", "a_int8", "=", "ins", "[", "0", "]", ".", "vload", "(", "[", "0", "]", ",", "\"uint8x4\"", ")", "re_int32", "=", "tvm", ".", "tir", ".", "call_intrin", "(", "\"int32\"", ",", "\"tir.reinterpret\"", ",", "a_int8", ")", "vec_ai32", "=", "re_int32", ".", "astype", "(", "int_32xl", ")", "vec_a", "=", "tvm", ".", "tir", ".", "call_intrin", "(", "int_8xl", ",", "\"tir.reinterpret\"", ",", "vec_ai32", ")", "vec_b", "=", "ins", "[", "1", "]", ".", "vload", "(", "[", "0", ",", "0", "]", ",", "int_8xl", ")", "vec_one", "=", "tvm", ".", "tir", ".", "const", "(", "1", ",", "int_lx32", ")", "pair_reduction", "=", "tvm", ".", "tir", ".", "call_llvm_pure_intrin", "(", "int_lx32", ",", "pmaddubs", ",", "tvm", ".", "tir", ".", "const", "(", "0", ",", "\"uint32\"", ")", ",", "vec_a", ",", "vec_b", ",", ")", "quad_reduction", "=", "tvm", ".", "tir", ".", "call_llvm_pure_intrin", "(", "int_32xl", ",", "pmaddw", ",", "tvm", ".", "tir", ".", "const", "(", "0", ",", "\"uint32\"", ")", ",", "pair_reduction", ",", "vec_one", ",", ")", "if", "index", "==", "0", ":", "ib", ".", "emit", "(", "outs", "[", "0", "]", ".", "vstore", "(", "0", ",", "quad_reduction", ")", ")", "else", ":", "ib", ".", "emit", "(", "outs", "[", "0", "]", ".", "vstore", "(", "0", ",", "quad_reduction", "+", "outs", "[", "0", "]", ".", "vload", "(", "[", "0", "]", ",", "int_32xl", ")", ")", ")", "return", "ib", ".", "get", "(", ")", "# body, reset, update", "return", "_instr", "(", "0", ")", ",", "_instr", "(", "1", ")", ",", "_instr", "(", "2", ")", "buffer_params", "=", "{", "\"offset_factor\"", ":", "1", "}", "return", "te", ".", "decl_tensor_intrin", "(", "C", ".", "op", ",", "_intrin_func", ",", "binds", "=", "{", "data", ":", "a_buffer", ",", "kernel", ":", "b_buffer", "}", ",", "default_buffer_params", "=", "buffer_params", ",", ")" ]
https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/topi/x86/tensor_intrin.py#L37-L149
viniciuschiele/flask-apscheduler
5f878c41417824a4f37b353cb4b22973a81746f2
flask_apscheduler/scheduler.py
python
APScheduler.add_job
(self, id, func, **kwargs)
return self._scheduler.add_job(**job_def)
Add the given job to the job list and wakes up the scheduler if it's already running. :param str id: explicit identifier for the job (for modifying it later) :param func: callable (or a textual reference to one) to run at the given time
Add the given job to the job list and wakes up the scheduler if it's already running.
[ "Add", "the", "given", "job", "to", "the", "job", "list", "and", "wakes", "up", "the", "scheduler", "if", "it", "s", "already", "running", "." ]
def add_job(self, id, func, **kwargs): """ Add the given job to the job list and wakes up the scheduler if it's already running. :param str id: explicit identifier for the job (for modifying it later) :param func: callable (or a textual reference to one) to run at the given time """ job_def = dict(kwargs) job_def['id'] = id job_def['func'] = func job_def['name'] = job_def.get('name') or id fix_job_def(job_def) return self._scheduler.add_job(**job_def)
[ "def", "add_job", "(", "self", ",", "id", ",", "func", ",", "*", "*", "kwargs", ")", ":", "job_def", "=", "dict", "(", "kwargs", ")", "job_def", "[", "'id'", "]", "=", "id", "job_def", "[", "'func'", "]", "=", "func", "job_def", "[", "'name'", "]", "=", "job_def", ".", "get", "(", "'name'", ")", "or", "id", "fix_job_def", "(", "job_def", ")", "return", "self", ".", "_scheduler", ".", "add_job", "(", "*", "*", "job_def", ")" ]
https://github.com/viniciuschiele/flask-apscheduler/blob/5f878c41417824a4f37b353cb4b22973a81746f2/flask_apscheduler/scheduler.py#L153-L168
Tencent/bk-bcs-saas
2b437bf2f5fd5ce2078f7787c3a12df609f7679d
bcs-app/backend/container_service/clusters/base/utils.py
python
get_cluster_coes
(access_token, project_id, cluster_id)
return cluster["type"]
获取集群类型,因为集群创建后,集群类型不允许修改 TODO: 为减少调用接口耗时,是否需要缓存?
获取集群类型,因为集群创建后,集群类型不允许修改 TODO: 为减少调用接口耗时,是否需要缓存?
[ "获取集群类型,因为集群创建后,集群类型不允许修改", "TODO", ":", "为减少调用接口耗时,是否需要缓存?" ]
def get_cluster_coes(access_token, project_id, cluster_id): """获取集群类型,因为集群创建后,集群类型不允许修改 TODO: 为减少调用接口耗时,是否需要缓存? """ cluster = get_cluster(access_token, project_id, cluster_id) return cluster["type"]
[ "def", "get_cluster_coes", "(", "access_token", ",", "project_id", ",", "cluster_id", ")", ":", "cluster", "=", "get_cluster", "(", "access_token", ",", "project_id", ",", "cluster_id", ")", "return", "cluster", "[", "\"type\"", "]" ]
https://github.com/Tencent/bk-bcs-saas/blob/2b437bf2f5fd5ce2078f7787c3a12df609f7679d/bcs-app/backend/container_service/clusters/base/utils.py#L102-L107
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/signal/signaltools.py
python
correlate
(in1, in2, mode='full', method='auto')
r""" Cross-correlate two N-dimensional arrays. Cross-correlate `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the correlation. ``direct`` The correlation is determined directly from sums, the definition of correlation. ``fft`` The Fast Fourier Transform is used to perform the correlation more quickly (only available for numerical arrays.) ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See `convolve` Notes for more detail. .. versionadded:: 0.19.0 Returns ------- correlate : array An N-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. See Also -------- choose_conv_method : contains more documentation on `method`. Notes ----- The correlation z of two d-dimensional arrays x and y is defined as:: z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then .. math:: z[k] = (x * y)(k - N + 1) = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, and :math:`y_m` is 0 when m is outside the range of y. ``method='fft'`` only works for numerical arrays as it relies on `fftconvolve`. In certain cases (i.e., arrays of objects or when rounding integers can lose precision), ``method='direct'`` is always used. Examples -------- Implement a matched filter using cross-correlation, to recover a signal that has passed through a noisy channel. >>> from scipy import signal >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) >>> sig_noise = sig + np.random.randn(len(sig)) >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 >>> import matplotlib.pyplot as plt >>> clock = np.arange(64, len(sig), 128) >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.plot(clock, sig[clock], 'ro') >>> ax_orig.set_title('Original signal') >>> ax_noise.plot(sig_noise) >>> ax_noise.set_title('Signal with noise') >>> ax_corr.plot(corr) >>> ax_corr.plot(clock, corr[clock], 'ro') >>> ax_corr.axhline(0.5, ls=':') >>> ax_corr.set_title('Cross-correlated with rectangular pulse') >>> ax_orig.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show()
r""" Cross-correlate two N-dimensional arrays.
[ "r", "Cross", "-", "correlate", "two", "N", "-", "dimensional", "arrays", "." ]
def correlate(in1, in2, mode='full', method='auto'): r""" Cross-correlate two N-dimensional arrays. Cross-correlate `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the correlation. ``direct`` The correlation is determined directly from sums, the definition of correlation. ``fft`` The Fast Fourier Transform is used to perform the correlation more quickly (only available for numerical arrays.) ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See `convolve` Notes for more detail. .. versionadded:: 0.19.0 Returns ------- correlate : array An N-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. See Also -------- choose_conv_method : contains more documentation on `method`. Notes ----- The correlation z of two d-dimensional arrays x and y is defined as:: z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then .. math:: z[k] = (x * y)(k - N + 1) = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, and :math:`y_m` is 0 when m is outside the range of y. ``method='fft'`` only works for numerical arrays as it relies on `fftconvolve`. In certain cases (i.e., arrays of objects or when rounding integers can lose precision), ``method='direct'`` is always used. Examples -------- Implement a matched filter using cross-correlation, to recover a signal that has passed through a noisy channel. >>> from scipy import signal >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) >>> sig_noise = sig + np.random.randn(len(sig)) >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 >>> import matplotlib.pyplot as plt >>> clock = np.arange(64, len(sig), 128) >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.plot(clock, sig[clock], 'ro') >>> ax_orig.set_title('Original signal') >>> ax_noise.plot(sig_noise) >>> ax_noise.set_title('Signal with noise') >>> ax_corr.plot(corr) >>> ax_corr.plot(clock, corr[clock], 'ro') >>> ax_corr.axhline(0.5, ls=':') >>> ax_corr.set_title('Cross-correlated with rectangular pulse') >>> ax_orig.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show() """ in1 = asarray(in1) in2 = asarray(in2) if in1.ndim == in2.ndim == 0: return in1 * in2.conj() elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") # Don't use _valfrommode, since correlate should not accept numeric modes try: val = _modedict[mode] except KeyError: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") # this either calls fftconvolve or this function with method=='direct' if method in ('fft', 'auto'): return convolve(in1, _reverse_and_conj(in2), mode, method) elif method == 'direct': # fastpath to faster numpy.correlate for 1d inputs when possible if _np_conv_ok(in1, in2, mode): return np.correlate(in1, in2, mode) # _correlateND is far slower when in2.size > in1.size, so swap them # and then undo the effect afterward if mode == 'full'. Also, it fails # with 'valid' mode if in2 is larger than in1, so swap those, too. # Don't swap inputs for 'same' mode, since shape of in1 matters. swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or _inputs_swap_needed(mode, in1.shape, in2.shape)) if swapped_inputs: in1, in2 = in2, in1 if mode == 'valid': ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] out = np.empty(ps, in1.dtype) z = sigtools._correlateND(in1, in2, out, val) else: ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] # zero pad input in1zpadded = np.zeros(ps, in1.dtype) sc = tuple(slice(0, i) for i in in1.shape) in1zpadded[sc] = in1.copy() if mode == 'full': out = np.empty(ps, in1.dtype) elif mode == 'same': out = np.empty(in1.shape, in1.dtype) z = sigtools._correlateND(in1zpadded, in2, out, val) if swapped_inputs: # Reverse and conjugate to undo the effect of swapping inputs z = _reverse_and_conj(z) return z else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.")
[ "def", "correlate", "(", "in1", ",", "in2", ",", "mode", "=", "'full'", ",", "method", "=", "'auto'", ")", ":", "in1", "=", "asarray", "(", "in1", ")", "in2", "=", "asarray", "(", "in2", ")", "if", "in1", ".", "ndim", "==", "in2", ".", "ndim", "==", "0", ":", "return", "in1", "*", "in2", ".", "conj", "(", ")", "elif", "in1", ".", "ndim", "!=", "in2", ".", "ndim", ":", "raise", "ValueError", "(", "\"in1 and in2 should have the same dimensionality\"", ")", "# Don't use _valfrommode, since correlate should not accept numeric modes", "try", ":", "val", "=", "_modedict", "[", "mode", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Acceptable mode flags are 'valid',\"", "\" 'same', or 'full'.\"", ")", "# this either calls fftconvolve or this function with method=='direct'", "if", "method", "in", "(", "'fft'", ",", "'auto'", ")", ":", "return", "convolve", "(", "in1", ",", "_reverse_and_conj", "(", "in2", ")", ",", "mode", ",", "method", ")", "elif", "method", "==", "'direct'", ":", "# fastpath to faster numpy.correlate for 1d inputs when possible", "if", "_np_conv_ok", "(", "in1", ",", "in2", ",", "mode", ")", ":", "return", "np", ".", "correlate", "(", "in1", ",", "in2", ",", "mode", ")", "# _correlateND is far slower when in2.size > in1.size, so swap them", "# and then undo the effect afterward if mode == 'full'. Also, it fails", "# with 'valid' mode if in2 is larger than in1, so swap those, too.", "# Don't swap inputs for 'same' mode, since shape of in1 matters.", "swapped_inputs", "=", "(", "(", "mode", "==", "'full'", ")", "and", "(", "in2", ".", "size", ">", "in1", ".", "size", ")", "or", "_inputs_swap_needed", "(", "mode", ",", "in1", ".", "shape", ",", "in2", ".", "shape", ")", ")", "if", "swapped_inputs", ":", "in1", ",", "in2", "=", "in2", ",", "in1", "if", "mode", "==", "'valid'", ":", "ps", "=", "[", "i", "-", "j", "+", "1", "for", "i", ",", "j", "in", "zip", "(", "in1", ".", "shape", ",", "in2", ".", "shape", ")", "]", "out", "=", "np", ".", "empty", "(", "ps", ",", "in1", ".", "dtype", ")", "z", "=", "sigtools", ".", "_correlateND", "(", "in1", ",", "in2", ",", "out", ",", "val", ")", "else", ":", "ps", "=", "[", "i", "+", "j", "-", "1", "for", "i", ",", "j", "in", "zip", "(", "in1", ".", "shape", ",", "in2", ".", "shape", ")", "]", "# zero pad input", "in1zpadded", "=", "np", ".", "zeros", "(", "ps", ",", "in1", ".", "dtype", ")", "sc", "=", "tuple", "(", "slice", "(", "0", ",", "i", ")", "for", "i", "in", "in1", ".", "shape", ")", "in1zpadded", "[", "sc", "]", "=", "in1", ".", "copy", "(", ")", "if", "mode", "==", "'full'", ":", "out", "=", "np", ".", "empty", "(", "ps", ",", "in1", ".", "dtype", ")", "elif", "mode", "==", "'same'", ":", "out", "=", "np", ".", "empty", "(", "in1", ".", "shape", ",", "in1", ".", "dtype", ")", "z", "=", "sigtools", ".", "_correlateND", "(", "in1zpadded", ",", "in2", ",", "out", ",", "val", ")", "if", "swapped_inputs", ":", "# Reverse and conjugate to undo the effect of swapping inputs", "z", "=", "_reverse_and_conj", "(", "z", ")", "return", "z", "else", ":", "raise", "ValueError", "(", "\"Acceptable method flags are 'auto',\"", "\" 'direct', or 'fft'.\"", ")" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/signal/signaltools.py#L105-L269
TheAlgorithms/Python
9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c
ciphers/porta_cipher.py
python
generate_table
(key: str)
return [alphabet[char] for char in key.upper()]
>>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
>>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
[ ">>>", "generate_table", "(", "marvin", ")", "#", "doctest", ":", "+", "NORMALIZE_WHITESPACE", "[", "(", "ABCDEFGHIJKLM", "UVWXYZNOPQRST", ")", "(", "ABCDEFGHIJKLM", "NOPQRSTUVWXYZ", ")", "(", "ABCDEFGHIJKLM", "STUVWXYZNOPQR", ")", "(", "ABCDEFGHIJKLM", "QRSTUVWXYZNOP", ")", "(", "ABCDEFGHIJKLM", "WXYZNOPQRSTUV", ")", "(", "ABCDEFGHIJKLM", "UVWXYZNOPQRST", ")", "]" ]
def generate_table(key: str) -> list[tuple[str, str]]: """ >>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] """ return [alphabet[char] for char in key.upper()]
[ "def", "generate_table", "(", "key", ":", "str", ")", "->", "list", "[", "tuple", "[", "str", ",", "str", "]", "]", ":", "return", "[", "alphabet", "[", "char", "]", "for", "char", "in", "key", ".", "upper", "(", ")", "]" ]
https://github.com/TheAlgorithms/Python/blob/9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c/ciphers/porta_cipher.py#L31-L38
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/sdb/cache.py
python
set_
(key, value, service=None, profile=None)
return get(key, service, profile)
Set a key/value pair in the cache service
Set a key/value pair in the cache service
[ "Set", "a", "key", "/", "value", "pair", "in", "the", "cache", "service" ]
def set_(key, value, service=None, profile=None): # pylint: disable=W0613 """ Set a key/value pair in the cache service """ key, profile = _parse_key(key, profile) cache = salt.cache.Cache(__opts__) cache.store(profile["bank"], key, value) return get(key, service, profile)
[ "def", "set_", "(", "key", ",", "value", ",", "service", "=", "None", ",", "profile", "=", "None", ")", ":", "# pylint: disable=W0613", "key", ",", "profile", "=", "_parse_key", "(", "key", ",", "profile", ")", "cache", "=", "salt", ".", "cache", ".", "Cache", "(", "__opts__", ")", "cache", ".", "store", "(", "profile", "[", "\"bank\"", "]", ",", "key", ",", "value", ")", "return", "get", "(", "key", ",", "service", ",", "profile", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/sdb/cache.py#L62-L69
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/plat-mac/Carbon/Menus.py
python
FOUR_CHAR_CODE
(x)
return x
[]
def FOUR_CHAR_CODE(x): return x
[ "def", "FOUR_CHAR_CODE", "(", "x", ")", ":", "return", "x" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-mac/Carbon/Menus.py#L3-L3
garydoranjr/misvm
b2118fe04d98c00436bdf8a0e4bbfb6082c5751c
misvm/misssvm.py
python
MissSVM.fit
(self, bags, y)
@param bags : a sequence of n bags; each bag is an m-by-k array-like object containing m instances with k features @param y : an array-like object of length n containing -1/+1 labels
[]
def fit(self, bags, y): """ @param bags : a sequence of n bags; each bag is an m-by-k array-like object containing m instances with k features @param y : an array-like object of length n containing -1/+1 labels """ self._bags = list(map(np.asmatrix, bags)) bs = BagSplitter(self._bags, np.asmatrix(y).reshape((-1, 1))) self._X = np.vstack([bs.pos_instances, bs.pos_instances, bs.pos_instances, bs.neg_instances]) self._y = np.vstack([np.matrix(np.ones((bs.X_p + bs.L_p, 1))), -np.matrix(np.ones((bs.L_p + bs.L_n, 1)))]) if self.scale_C: C = self.C / float(len(self._bags)) else: C = self.C # Setup SVM and adjust constraints _, _, f, A, b, lb, ub = self._setup_svm(self._y, self._y, C) ub[:bs.X_p] *= (float(bs.L_n) / float(bs.X_p)) ub[bs.X_p: bs.X_p + 2 * bs.L_p] *= (float(bs.L_n) / float(bs.L_p)) K = kernel_by_name(self.kernel, gamma=self.gamma, p=self.p)(self._X, self._X) D = spdiag(self._y) ub0 = np.matrix(ub) ub0[bs.X_p: bs.X_p + 2 * bs.L_p] *= 0.5 def get_V(pos_classifications): eye_n = bs.L_n + 2 * bs.L_p top = np.zeros((bs.X_p, bs.L_p)) for row, (i, j) in enumerate(slices(bs.pos_groups)): top[row, i:j] = _grad_softmin(-pos_classifications[i:j], self.alpha).flat return sp.bmat([[sp.coo_matrix(top), None], [None, sp.eye(eye_n, eye_n)]]) V0 = get_V(np.matrix(np.zeros((bs.L_p, 1)))) qp = IterativeQP(D * V0 * K * V0.T * D, f, A, b, lb, ub0) best_obj = float('inf') best_svm = None for rr in range(self.restarts + 1): if rr == 0: if self.verbose: print('Non-random start...') # Train on instances alphas, obj = qp.solve(self.verbose) else: if self.verbose: print('Random restart %d of %d...' % (rr, self.restarts)) alphas = np.matrix([uniform(0.0, 1.0) for i in range(len(lb))]).T obj = Objective(0.0, 0.0) svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose, sv_cutoff=self.sv_cutoff) svm._X = self._X svm._y = self._y svm._V = V0 svm._alphas = alphas svm._objective = obj svm._compute_separator(K) svm._K = K class missCCCP(CCCP): def bailout(cself, svm, obj_val): return svm def iterate(cself, svm, obj_val): cself.mention('Linearizing constraints...') classifications = svm._predictions[bs.X_p: bs.X_p + bs.L_p] V = get_V(classifications) cself.mention('Computing slacks...') # Difference is [1 - y_i*(w*phi(x_i) + b)] pos_differences = 1.0 - classifications neg_differences = 1.0 + classifications # Slacks are positive differences only pos_slacks = np.multiply(pos_differences > 0, pos_differences) neg_slacks = np.multiply(neg_differences > 0, neg_differences) all_slacks = np.hstack([pos_slacks, neg_slacks]) cself.mention('Linearizing...') # Compute gradient across pairs slack_grads = np.vstack([_grad_softmin(pair, self.alpha) for pair in all_slacks]) # Stack results into one column slack_grads = np.vstack([np.ones((bs.X_p, 1)), slack_grads[:, 0], slack_grads[:, 1], np.ones((bs.L_n, 1))]) # Update QP qp.update_H(D * V * K * V.T * D) qp.update_ub(np.multiply(ub, slack_grads)) # Re-solve cself.mention('Solving QP...') alphas, obj = qp.solve(self.verbose) new_svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose, sv_cutoff=self.sv_cutoff) new_svm._X = self._X new_svm._y = self._y new_svm._V = V new_svm._alphas = alphas new_svm._objective = obj new_svm._compute_separator(K) new_svm._K = K if cself.check_tolerance(obj_val, obj): return None, new_svm return {'svm': new_svm, 'obj_val': obj}, None cccp = missCCCP(verbose=self.verbose, svm=svm, obj_val=None, max_iters=self.max_iters) svm = cccp.solve() if svm is not None: obj = float(svm._objective) if obj < best_obj: best_svm = svm best_obj = obj if best_svm is not None: self._V = best_svm._V self._alphas = best_svm._alphas self._objective = best_svm._objective self._compute_separator(best_svm._K) self._bag_predictions = self.predict(self._bags)
[ "def", "fit", "(", "self", ",", "bags", ",", "y", ")", ":", "self", ".", "_bags", "=", "list", "(", "map", "(", "np", ".", "asmatrix", ",", "bags", ")", ")", "bs", "=", "BagSplitter", "(", "self", ".", "_bags", ",", "np", ".", "asmatrix", "(", "y", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", ")", "self", ".", "_X", "=", "np", ".", "vstack", "(", "[", "bs", ".", "pos_instances", ",", "bs", ".", "pos_instances", ",", "bs", ".", "pos_instances", ",", "bs", ".", "neg_instances", "]", ")", "self", ".", "_y", "=", "np", ".", "vstack", "(", "[", "np", ".", "matrix", "(", "np", ".", "ones", "(", "(", "bs", ".", "X_p", "+", "bs", ".", "L_p", ",", "1", ")", ")", ")", ",", "-", "np", ".", "matrix", "(", "np", ".", "ones", "(", "(", "bs", ".", "L_p", "+", "bs", ".", "L_n", ",", "1", ")", ")", ")", "]", ")", "if", "self", ".", "scale_C", ":", "C", "=", "self", ".", "C", "/", "float", "(", "len", "(", "self", ".", "_bags", ")", ")", "else", ":", "C", "=", "self", ".", "C", "# Setup SVM and adjust constraints", "_", ",", "_", ",", "f", ",", "A", ",", "b", ",", "lb", ",", "ub", "=", "self", ".", "_setup_svm", "(", "self", ".", "_y", ",", "self", ".", "_y", ",", "C", ")", "ub", "[", ":", "bs", ".", "X_p", "]", "*=", "(", "float", "(", "bs", ".", "L_n", ")", "/", "float", "(", "bs", ".", "X_p", ")", ")", "ub", "[", "bs", ".", "X_p", ":", "bs", ".", "X_p", "+", "2", "*", "bs", ".", "L_p", "]", "*=", "(", "float", "(", "bs", ".", "L_n", ")", "/", "float", "(", "bs", ".", "L_p", ")", ")", "K", "=", "kernel_by_name", "(", "self", ".", "kernel", ",", "gamma", "=", "self", ".", "gamma", ",", "p", "=", "self", ".", "p", ")", "(", "self", ".", "_X", ",", "self", ".", "_X", ")", "D", "=", "spdiag", "(", "self", ".", "_y", ")", "ub0", "=", "np", ".", "matrix", "(", "ub", ")", "ub0", "[", "bs", ".", "X_p", ":", "bs", ".", "X_p", "+", "2", "*", "bs", ".", "L_p", "]", "*=", "0.5", "def", "get_V", "(", "pos_classifications", ")", ":", "eye_n", "=", "bs", ".", "L_n", "+", "2", "*", "bs", ".", "L_p", "top", "=", "np", ".", "zeros", "(", "(", "bs", ".", "X_p", ",", "bs", ".", "L_p", ")", ")", "for", "row", ",", "(", "i", ",", "j", ")", "in", "enumerate", "(", "slices", "(", "bs", ".", "pos_groups", ")", ")", ":", "top", "[", "row", ",", "i", ":", "j", "]", "=", "_grad_softmin", "(", "-", "pos_classifications", "[", "i", ":", "j", "]", ",", "self", ".", "alpha", ")", ".", "flat", "return", "sp", ".", "bmat", "(", "[", "[", "sp", ".", "coo_matrix", "(", "top", ")", ",", "None", "]", ",", "[", "None", ",", "sp", ".", "eye", "(", "eye_n", ",", "eye_n", ")", "]", "]", ")", "V0", "=", "get_V", "(", "np", ".", "matrix", "(", "np", ".", "zeros", "(", "(", "bs", ".", "L_p", ",", "1", ")", ")", ")", ")", "qp", "=", "IterativeQP", "(", "D", "*", "V0", "*", "K", "*", "V0", ".", "T", "*", "D", ",", "f", ",", "A", ",", "b", ",", "lb", ",", "ub0", ")", "best_obj", "=", "float", "(", "'inf'", ")", "best_svm", "=", "None", "for", "rr", "in", "range", "(", "self", ".", "restarts", "+", "1", ")", ":", "if", "rr", "==", "0", ":", "if", "self", ".", "verbose", ":", "print", "(", "'Non-random start...'", ")", "# Train on instances", "alphas", ",", "obj", "=", "qp", ".", "solve", "(", "self", ".", "verbose", ")", "else", ":", "if", "self", ".", "verbose", ":", "print", "(", "'Random restart %d of %d...'", "%", "(", "rr", ",", "self", ".", "restarts", ")", ")", "alphas", "=", "np", ".", "matrix", "(", "[", "uniform", "(", "0.0", ",", "1.0", ")", "for", "i", "in", "range", "(", "len", "(", "lb", ")", ")", "]", ")", ".", "T", "obj", "=", "Objective", "(", "0.0", ",", "0.0", ")", "svm", "=", "MICA", "(", "kernel", "=", "self", ".", "kernel", ",", "gamma", "=", "self", ".", "gamma", ",", "p", "=", "self", ".", "p", ",", "verbose", "=", "self", ".", "verbose", ",", "sv_cutoff", "=", "self", ".", "sv_cutoff", ")", "svm", ".", "_X", "=", "self", ".", "_X", "svm", ".", "_y", "=", "self", ".", "_y", "svm", ".", "_V", "=", "V0", "svm", ".", "_alphas", "=", "alphas", "svm", ".", "_objective", "=", "obj", "svm", ".", "_compute_separator", "(", "K", ")", "svm", ".", "_K", "=", "K", "class", "missCCCP", "(", "CCCP", ")", ":", "def", "bailout", "(", "cself", ",", "svm", ",", "obj_val", ")", ":", "return", "svm", "def", "iterate", "(", "cself", ",", "svm", ",", "obj_val", ")", ":", "cself", ".", "mention", "(", "'Linearizing constraints...'", ")", "classifications", "=", "svm", ".", "_predictions", "[", "bs", ".", "X_p", ":", "bs", ".", "X_p", "+", "bs", ".", "L_p", "]", "V", "=", "get_V", "(", "classifications", ")", "cself", ".", "mention", "(", "'Computing slacks...'", ")", "# Difference is [1 - y_i*(w*phi(x_i) + b)]", "pos_differences", "=", "1.0", "-", "classifications", "neg_differences", "=", "1.0", "+", "classifications", "# Slacks are positive differences only", "pos_slacks", "=", "np", ".", "multiply", "(", "pos_differences", ">", "0", ",", "pos_differences", ")", "neg_slacks", "=", "np", ".", "multiply", "(", "neg_differences", ">", "0", ",", "neg_differences", ")", "all_slacks", "=", "np", ".", "hstack", "(", "[", "pos_slacks", ",", "neg_slacks", "]", ")", "cself", ".", "mention", "(", "'Linearizing...'", ")", "# Compute gradient across pairs", "slack_grads", "=", "np", ".", "vstack", "(", "[", "_grad_softmin", "(", "pair", ",", "self", ".", "alpha", ")", "for", "pair", "in", "all_slacks", "]", ")", "# Stack results into one column", "slack_grads", "=", "np", ".", "vstack", "(", "[", "np", ".", "ones", "(", "(", "bs", ".", "X_p", ",", "1", ")", ")", ",", "slack_grads", "[", ":", ",", "0", "]", ",", "slack_grads", "[", ":", ",", "1", "]", ",", "np", ".", "ones", "(", "(", "bs", ".", "L_n", ",", "1", ")", ")", "]", ")", "# Update QP", "qp", ".", "update_H", "(", "D", "*", "V", "*", "K", "*", "V", ".", "T", "*", "D", ")", "qp", ".", "update_ub", "(", "np", ".", "multiply", "(", "ub", ",", "slack_grads", ")", ")", "# Re-solve", "cself", ".", "mention", "(", "'Solving QP...'", ")", "alphas", ",", "obj", "=", "qp", ".", "solve", "(", "self", ".", "verbose", ")", "new_svm", "=", "MICA", "(", "kernel", "=", "self", ".", "kernel", ",", "gamma", "=", "self", ".", "gamma", ",", "p", "=", "self", ".", "p", ",", "verbose", "=", "self", ".", "verbose", ",", "sv_cutoff", "=", "self", ".", "sv_cutoff", ")", "new_svm", ".", "_X", "=", "self", ".", "_X", "new_svm", ".", "_y", "=", "self", ".", "_y", "new_svm", ".", "_V", "=", "V", "new_svm", ".", "_alphas", "=", "alphas", "new_svm", ".", "_objective", "=", "obj", "new_svm", ".", "_compute_separator", "(", "K", ")", "new_svm", ".", "_K", "=", "K", "if", "cself", ".", "check_tolerance", "(", "obj_val", ",", "obj", ")", ":", "return", "None", ",", "new_svm", "return", "{", "'svm'", ":", "new_svm", ",", "'obj_val'", ":", "obj", "}", ",", "None", "cccp", "=", "missCCCP", "(", "verbose", "=", "self", ".", "verbose", ",", "svm", "=", "svm", ",", "obj_val", "=", "None", ",", "max_iters", "=", "self", ".", "max_iters", ")", "svm", "=", "cccp", ".", "solve", "(", ")", "if", "svm", "is", "not", "None", ":", "obj", "=", "float", "(", "svm", ".", "_objective", ")", "if", "obj", "<", "best_obj", ":", "best_svm", "=", "svm", "best_obj", "=", "obj", "if", "best_svm", "is", "not", "None", ":", "self", ".", "_V", "=", "best_svm", ".", "_V", "self", ".", "_alphas", "=", "best_svm", ".", "_alphas", "self", ".", "_objective", "=", "best_svm", ".", "_objective", "self", ".", "_compute_separator", "(", "best_svm", ".", "_K", ")", "self", ".", "_bag_predictions", "=", "self", ".", "predict", "(", "self", ".", "_bags", ")" ]
https://github.com/garydoranjr/misvm/blob/b2118fe04d98c00436bdf8a0e4bbfb6082c5751c/misvm/misssvm.py#L45-L173
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py
python
MSSQLCompiler.visit_binary
(self, binary, **kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
Move bind parameters to the right-hand side of an operator, where possible.
Move bind parameters to the right-hand side of an operator, where possible.
[ "Move", "bind", "parameters", "to", "the", "right", "-", "hand", "side", "of", "an", "operator", "where", "possible", "." ]
def visit_binary(self, binary, **kwargs): """Move bind parameters to the right-hand side of an operator, where possible. """ if ( isinstance(binary.left, expression.BindParameter) and binary.operator == operator.eq and not isinstance(binary.right, expression.BindParameter) ): return self.process( expression.BinaryExpression(binary.right, binary.left, binary.operator), **kwargs) return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
[ "def", "visit_binary", "(", "self", ",", "binary", ",", "*", "*", "kwargs", ")", ":", "if", "(", "isinstance", "(", "binary", ".", "left", ",", "expression", ".", "BindParameter", ")", "and", "binary", ".", "operator", "==", "operator", ".", "eq", "and", "not", "isinstance", "(", "binary", ".", "right", ",", "expression", ".", "BindParameter", ")", ")", ":", "return", "self", ".", "process", "(", "expression", ".", "BinaryExpression", "(", "binary", ".", "right", ",", "binary", ".", "left", ",", "binary", ".", "operator", ")", ",", "*", "*", "kwargs", ")", "return", "super", "(", "MSSQLCompiler", ",", "self", ")", ".", "visit_binary", "(", "binary", ",", "*", "*", "kwargs", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py#L1287-L1302
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pygments-2.0.2-py3.3.egg/pygments/scanner.py
python
Scanner.test
(self, pattern)
return self.check(pattern) is not None
Apply a pattern on the current position and check if it patches. Doesn't touch pos.
Apply a pattern on the current position and check if it patches. Doesn't touch pos.
[ "Apply", "a", "pattern", "on", "the", "current", "position", "and", "check", "if", "it", "patches", ".", "Doesn", "t", "touch", "pos", "." ]
def test(self, pattern): """Apply a pattern on the current position and check if it patches. Doesn't touch pos.""" return self.check(pattern) is not None
[ "def", "test", "(", "self", ",", "pattern", ")", ":", "return", "self", ".", "check", "(", "pattern", ")", "is", "not", "None" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pygments-2.0.2-py3.3.egg/pygments/scanner.py#L67-L70
ilius/pyglossary
d599b3beda3ae17642af5debd83bb991148e6425
pyglossary/plugin_lib/py38/gzip_no_crc.py
python
GzipFile.filename
(self)
return self.name
[]
def filename(self): import warnings warnings.warn("use the name attribute", DeprecationWarning, 2) if self.mode == WRITE and self.name[-3:] != ".gz": return self.name + ".gz" return self.name
[ "def", "filename", "(", "self", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"use the name attribute\"", ",", "DeprecationWarning", ",", "2", ")", "if", "self", ".", "mode", "==", "WRITE", "and", "self", ".", "name", "[", "-", "3", ":", "]", "!=", "\".gz\"", ":", "return", "self", ".", "name", "+", "\".gz\"", "return", "self", ".", "name" ]
https://github.com/ilius/pyglossary/blob/d599b3beda3ae17642af5debd83bb991148e6425/pyglossary/plugin_lib/py38/gzip_no_crc.py#L200-L205
GothicAi/Instaboost
b6f80405b8706adad4aca1c1bdbb650b9c1c71e5
mmdetection/mmdet/core/bbox/samplers/random_sampler.py
python
RandomSampler._sample_pos
(self, assign_result, num_expected, **kwargs)
Randomly sample some positive samples.
Randomly sample some positive samples.
[ "Randomly", "sample", "some", "positive", "samples", "." ]
def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected)
[ "def", "_sample_pos", "(", "self", ",", "assign_result", ",", "num_expected", ",", "*", "*", "kwargs", ")", ":", "pos_inds", "=", "torch", ".", "nonzero", "(", "assign_result", ".", "gt_inds", ">", "0", ")", "if", "pos_inds", ".", "numel", "(", ")", "!=", "0", ":", "pos_inds", "=", "pos_inds", ".", "squeeze", "(", "1", ")", "if", "pos_inds", ".", "numel", "(", ")", "<=", "num_expected", ":", "return", "pos_inds", "else", ":", "return", "self", ".", "random_choice", "(", "pos_inds", ",", "num_expected", ")" ]
https://github.com/GothicAi/Instaboost/blob/b6f80405b8706adad4aca1c1bdbb650b9c1c71e5/mmdetection/mmdet/core/bbox/samplers/random_sampler.py#L35-L43
out0fmemory/GoAgent-Always-Available
c4254984fea633ce3d1893fe5901debd9f22c2a9
server/lib/google/appengine/_internal/antlr3/streams.py
python
TokenRewriteStream.rollback
(self, *args)
Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED!
Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED!
[ "Rollback", "the", "instruction", "stream", "for", "a", "program", "so", "that", "the", "indicated", "instruction", "(", "via", "instructionIndex", ")", "is", "no", "longer", "in", "the", "stream", ".", "UNTESTED!" ]
def rollback(self, *args): """ Rollback the instruction stream for a program so that the indicated instruction (via instructionIndex) is no longer in the stream. UNTESTED! """ if len(args) == 2: programName = args[0] instructionIndex = args[1] elif len(args) == 1: programName = self.DEFAULT_PROGRAM_NAME instructionIndex = args[0] else: raise TypeError("Invalid arguments") p = self.programs.get(programName, None) if p is not None: self.programs[programName] = ( p[self.MIN_TOKEN_INDEX:instructionIndex])
[ "def", "rollback", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "2", ":", "programName", "=", "args", "[", "0", "]", "instructionIndex", "=", "args", "[", "1", "]", "elif", "len", "(", "args", ")", "==", "1", ":", "programName", "=", "self", ".", "DEFAULT_PROGRAM_NAME", "instructionIndex", "=", "args", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"Invalid arguments\"", ")", "p", "=", "self", ".", "programs", ".", "get", "(", "programName", ",", "None", ")", "if", "p", "is", "not", "None", ":", "self", ".", "programs", "[", "programName", "]", "=", "(", "p", "[", "self", ".", "MIN_TOKEN_INDEX", ":", "instructionIndex", "]", ")" ]
https://github.com/out0fmemory/GoAgent-Always-Available/blob/c4254984fea633ce3d1893fe5901debd9f22c2a9/server/lib/google/appengine/_internal/antlr3/streams.py#L1066-L1085
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/common/lib/python2.7/site-packages/libxml2.py
python
relaxNGInitTypes
()
return ret
Initilize the default type libraries.
Initilize the default type libraries.
[ "Initilize", "the", "default", "type", "libraries", "." ]
def relaxNGInitTypes(): """Initilize the default type libraries. """ ret = libxml2mod.xmlRelaxNGInitTypes() return ret
[ "def", "relaxNGInitTypes", "(", ")", ":", "ret", "=", "libxml2mod", ".", "xmlRelaxNGInitTypes", "(", ")", "return", "ret" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/common/lib/python2.7/site-packages/libxml2.py#L1574-L1577
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/root_system/type_dual.py
python
CartanType.dual
(self)
return self._type
EXAMPLES:: sage: ct = CartanType(['F', 4, 1]).dual() sage: ct.dual() ['F', 4, 1]
EXAMPLES::
[ "EXAMPLES", "::" ]
def dual(self): """ EXAMPLES:: sage: ct = CartanType(['F', 4, 1]).dual() sage: ct.dual() ['F', 4, 1] """ return self._type
[ "def", "dual", "(", "self", ")", ":", "return", "self", ".", "_type" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/root_system/type_dual.py#L324-L332
numenta/nupic
b9ebedaf54f49a33de22d8d44dff7c765cdb5548
external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py
python
jet
()
set the default colormap to jet and apply to current image if any. See help(colormaps) for more information
set the default colormap to jet and apply to current image if any. See help(colormaps) for more information
[ "set", "the", "default", "colormap", "to", "jet", "and", "apply", "to", "current", "image", "if", "any", ".", "See", "help", "(", "colormaps", ")", "for", "more", "information" ]
def jet(): ''' set the default colormap to jet and apply to current image if any. See help(colormaps) for more information ''' rc('image', cmap='jet') im = gci() if im is not None: im.set_cmap(cm.jet) draw_if_interactive()
[ "def", "jet", "(", ")", ":", "rc", "(", "'image'", ",", "cmap", "=", "'jet'", ")", "im", "=", "gci", "(", ")", "if", "im", "is", "not", "None", ":", "im", ".", "set_cmap", "(", "cm", ".", "jet", ")", "draw_if_interactive", "(", ")" ]
https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py#L2599-L2609
thomasvs/morituri
135b2f7bf27721177e3aeb1d26403f1b29116599
morituri/extern/task/task.py
python
ITaskListener.progressed
(self, task, value)
Implement me to be informed about progress. @type value: float @param value: progress, from 0.0 to 1.0
Implement me to be informed about progress.
[ "Implement", "me", "to", "be", "informed", "about", "progress", "." ]
def progressed(self, task, value): """ Implement me to be informed about progress. @type value: float @param value: progress, from 0.0 to 1.0 """
[ "def", "progressed", "(", "self", ",", "task", ",", "value", ")", ":" ]
https://github.com/thomasvs/morituri/blob/135b2f7bf27721177e3aeb1d26403f1b29116599/morituri/extern/task/task.py#L247-L253
bnpy/bnpy
d5b311e8f58ccd98477f4a0c8a4d4982e3fca424
bnpy/allocmodel/hmm/HDPHMM.py
python
HDPHMM.get_init_prob_vector
(self)
return expELogPi0[0:self.K]
Get vector of initial probabilities for all K active states
Get vector of initial probabilities for all K active states
[ "Get", "vector", "of", "initial", "probabilities", "for", "all", "K", "active", "states" ]
def get_init_prob_vector(self): ''' Get vector of initial probabilities for all K active states ''' expELogPi0 = digamma( self.startTheta) - digamma(np.sum(self.startTheta)) np.exp(expELogPi0, out=expELogPi0) return expELogPi0[0:self.K]
[ "def", "get_init_prob_vector", "(", "self", ")", ":", "expELogPi0", "=", "digamma", "(", "self", ".", "startTheta", ")", "-", "digamma", "(", "np", ".", "sum", "(", "self", ".", "startTheta", ")", ")", "np", ".", "exp", "(", "expELogPi0", ",", "out", "=", "expELogPi0", ")", "return", "expELogPi0", "[", "0", ":", "self", ".", "K", "]" ]
https://github.com/bnpy/bnpy/blob/d5b311e8f58ccd98477f4a0c8a4d4982e3fca424/bnpy/allocmodel/hmm/HDPHMM.py#L81-L87
itailang/SampleNet
442459abc54f9e14f0966a169a094a98febd32eb
classification/utils/plyfile.py
python
PlyProperty.name
(self)
return self._name
[]
def name(self): return self._name
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_name" ]
https://github.com/itailang/SampleNet/blob/442459abc54f9e14f0966a169a094a98febd32eb/classification/utils/plyfile.py#L730-L731
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/decimal.py
python
Context.remainder_near
(self, a, b)
return a.remainder_near(b, context=self)
Returns to be "a - b * n", where n is the integer nearest the exact value of "x / b" (if two integers are equally near then the even one is chosen). If the result is equal to 0 then its sign will be the sign of a. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) Decimal('-0.9') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) Decimal('-2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) Decimal('-0.3') >>> ExtendedContext.remainder_near(3, 11) Decimal('3') >>> ExtendedContext.remainder_near(Decimal(3), 11) Decimal('3') >>> ExtendedContext.remainder_near(3, Decimal(11)) Decimal('3')
Returns to be "a - b * n", where n is the integer nearest the exact value of "x / b" (if two integers are equally near then the even one is chosen). If the result is equal to 0 then its sign will be the sign of a.
[ "Returns", "to", "be", "a", "-", "b", "*", "n", "where", "n", "is", "the", "integer", "nearest", "the", "exact", "value", "of", "x", "/", "b", "(", "if", "two", "integers", "are", "equally", "near", "then", "the", "even", "one", "is", "chosen", ")", ".", "If", "the", "result", "is", "equal", "to", "0", "then", "its", "sign", "will", "be", "the", "sign", "of", "a", "." ]
def remainder_near(self, a, b): """Returns to be "a - b * n", where n is the integer nearest the exact value of "x / b" (if two integers are equally near then the even one is chosen). If the result is equal to 0 then its sign will be the sign of a. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) Decimal('-0.9') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) Decimal('-2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) Decimal('-0.3') >>> ExtendedContext.remainder_near(3, 11) Decimal('3') >>> ExtendedContext.remainder_near(Decimal(3), 11) Decimal('3') >>> ExtendedContext.remainder_near(3, Decimal(11)) Decimal('3') """ a = _convert_other(a, raiseit=True) return a.remainder_near(b, context=self)
[ "def", "remainder_near", "(", "self", ",", "a", ",", "b", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "return", "a", ".", "remainder_near", "(", "b", ",", "context", "=", "self", ")" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/decimal.py#L5149-L5181
entropy1337/infernal-twin
10995cd03312e39a48ade0f114ebb0ae3a711bb8
Modules/build/pip/pip/utils/appdirs.py
python
site_config_dirs
(appname)
return pathlist
Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\
Return a list of potential user-shared config dirs for this application.
[ "Return", "a", "list", "of", "potential", "user", "-", "shared", "config", "dirs", "for", "this", "application", "." ]
def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: Mac OS X: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.sep.join([os.path.expanduser(x), appname]) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist
[ "def", "site_config_dirs", "(", "appname", ")", ":", "if", "WINDOWS", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "_get_win_folder", "(", "\"CSIDL_COMMON_APPDATA\"", ")", ")", "pathlist", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "appname", ")", "]", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "pathlist", "=", "[", "os", ".", "path", ".", "join", "(", "'/Library/Application Support'", ",", "appname", ")", "]", "else", ":", "# try looking in $XDG_CONFIG_DIRS", "xdg_config_dirs", "=", "os", ".", "getenv", "(", "'XDG_CONFIG_DIRS'", ",", "'/etc/xdg'", ")", "if", "xdg_config_dirs", ":", "pathlist", "=", "[", "os", ".", "sep", ".", "join", "(", "[", "os", ".", "path", ".", "expanduser", "(", "x", ")", ",", "appname", "]", ")", "for", "x", "in", "xdg_config_dirs", ".", "split", "(", "os", ".", "pathsep", ")", "]", "else", ":", "pathlist", "=", "[", "]", "# always look in /etc directly as well", "pathlist", ".", "append", "(", "'/etc'", ")", "return", "pathlist" ]
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/pip/pip/utils/appdirs.py#L165-L200
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/selectors.py
python
BaseSelector.register
(self, fileobj, events, data=None)
Register a file object. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised
Register a file object.
[ "Register", "a", "file", "object", "." ]
def register(self, fileobj, events, data=None): """Register a file object. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised """ raise NotImplementedError
[ "def", "register", "(", "self", ",", "fileobj", ",", "events", ",", "data", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/selectors.py#L96-L116
trailofbits/manticore
b050fdf0939f6c63f503cdf87ec0ab159dd41159
manticore/core/smtlib/solver.py
python
PortfolioSolver._reset
(self, constraints: Optional[str] = None)
Auxiliary method to reset the smtlib external solver to initial defaults
Auxiliary method to reset the smtlib external solver to initial defaults
[ "Auxiliary", "method", "to", "reset", "the", "smtlib", "external", "solver", "to", "initial", "defaults" ]
def _reset(self, constraints: Optional[str] = None) -> None: """Auxiliary method to reset the smtlib external solver to initial defaults""" if self._support_reset: self._smtlib.start() # does not do anything if already started self._smtlib.send("(reset)") else: self._smtlib.stop() # does not do anything if already stopped self._smtlib.start() self._smtlib.init() if constraints is not None: self._smtlib.send(constraints)
[ "def", "_reset", "(", "self", ",", "constraints", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "if", "self", ".", "_support_reset", ":", "self", ".", "_smtlib", ".", "start", "(", ")", "# does not do anything if already started", "self", ".", "_smtlib", ".", "send", "(", "\"(reset)\"", ")", "else", ":", "self", ".", "_smtlib", ".", "stop", "(", ")", "# does not do anything if already stopped", "self", ".", "_smtlib", ".", "start", "(", ")", "self", ".", "_smtlib", ".", "init", "(", ")", "if", "constraints", "is", "not", "None", ":", "self", ".", "_smtlib", ".", "send", "(", "constraints", ")" ]
https://github.com/trailofbits/manticore/blob/b050fdf0939f6c63f503cdf87ec0ab159dd41159/manticore/core/smtlib/solver.py#L1066-L1078
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/graphics.py
python
GraphicsBitmap.__init__
(self, width=-1, height=-1, format=FORMAT_ARGB32)
Create either a NULL GraphicsBitmap or an empty one if a size is given
Create either a NULL GraphicsBitmap or an empty one if a size is given
[ "Create", "either", "a", "NULL", "GraphicsBitmap", "or", "an", "empty", "one", "if", "a", "size", "is", "given" ]
def __init__(self, width=-1, height=-1, format=FORMAT_ARGB32): """Create either a NULL GraphicsBitmap or an empty one if a size is given""" self._surface = None if width > 0 and height > 0: self._surface = cairo.ImageSurface(format, width, height)
[ "def", "__init__", "(", "self", ",", "width", "=", "-", "1", ",", "height", "=", "-", "1", ",", "format", "=", "FORMAT_ARGB32", ")", ":", "self", ".", "_surface", "=", "None", "if", "width", ">", "0", "and", "height", ">", "0", ":", "self", ".", "_surface", "=", "cairo", ".", "ImageSurface", "(", "format", ",", "width", ",", "height", ")" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/graphics.py#L611-L615
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py
python
SimpleScrapingLocator.get_page
(self, url)
return result
Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator).
Get the HTML for an URL, possibly from an in-memory cache.
[ "Get", "the", "HTML", "for", "an", "URL", "possibly", "from", "an", "in", "-", "memory", "cache", "." ]
def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result
[ "def", "get_page", "(", "self", ",", "url", ")", ":", "# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api", "scheme", ",", "netloc", ",", "path", ",", "_", ",", "_", ",", "_", "=", "urlparse", "(", "url", ")", "if", "scheme", "==", "'file'", "and", "os", ".", "path", ".", "isdir", "(", "url2pathname", "(", "path", ")", ")", ":", "url", "=", "urljoin", "(", "ensure_slash", "(", "url", ")", ",", "'index.html'", ")", "if", "url", "in", "self", ".", "_page_cache", ":", "result", "=", "self", ".", "_page_cache", "[", "url", "]", "logger", ".", "debug", "(", "'Returning %s from cache: %s'", ",", "url", ",", "result", ")", "else", ":", "host", "=", "netloc", ".", "split", "(", "':'", ",", "1", ")", "[", "0", "]", "result", "=", "None", "if", "host", "in", "self", ".", "_bad_hosts", ":", "logger", ".", "debug", "(", "'Skipping %s due to bad host %s'", ",", "url", ",", "host", ")", "else", ":", "req", "=", "Request", "(", "url", ",", "headers", "=", "{", "'Accept-encoding'", ":", "'identity'", "}", ")", "try", ":", "logger", ".", "debug", "(", "'Fetching %s'", ",", "url", ")", "resp", "=", "self", ".", "opener", ".", "open", "(", "req", ",", "timeout", "=", "self", ".", "timeout", ")", "logger", ".", "debug", "(", "'Fetched %s'", ",", "url", ")", "headers", "=", "resp", ".", "info", "(", ")", "content_type", "=", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "if", "HTML_CONTENT_TYPE", ".", "match", "(", "content_type", ")", ":", "final_url", "=", "resp", ".", "geturl", "(", ")", "data", "=", "resp", ".", "read", "(", ")", "encoding", "=", "headers", ".", "get", "(", "'Content-Encoding'", ")", "if", "encoding", ":", "decoder", "=", "self", ".", "decoders", "[", "encoding", "]", "# fail if not found", "data", "=", "decoder", "(", "data", ")", "encoding", "=", "'utf-8'", "m", "=", "CHARSET", ".", "search", "(", "content_type", ")", "if", "m", ":", "encoding", "=", "m", ".", "group", "(", "1", ")", "try", ":", "data", "=", "data", ".", "decode", "(", "encoding", ")", "except", "UnicodeError", ":", "# pragma: no cover", "data", "=", "data", ".", "decode", "(", "'latin-1'", ")", "# fallback", "result", "=", "Page", "(", "data", ",", "final_url", ")", "self", ".", "_page_cache", "[", "final_url", "]", "=", "result", "except", "HTTPError", "as", "e", ":", "if", "e", ".", "code", "!=", "404", ":", "logger", ".", "exception", "(", "'Fetch failed: %s: %s'", ",", "url", ",", "e", ")", "except", "URLError", "as", "e", ":", "# pragma: no cover", "logger", ".", "exception", "(", "'Fetch failed: %s: %s'", ",", "url", ",", "e", ")", "with", "self", ".", "_lock", ":", "self", ".", "_bad_hosts", ".", "add", "(", "host", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "logger", ".", "exception", "(", "'Fetch failed: %s: %s'", ",", "url", ",", "e", ")", "finally", ":", "self", ".", "_page_cache", "[", "url", "]", "=", "result", "# even if None (failure)", "return", "result" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py#L752-L809
SheffieldML/GPy
bb1bc5088671f9316bc92a46d356734e34c2d5c0
GPy/util/mocap.py
python
vertex.__init__
(self, name, id, parents=[], children=[], meta = {})
[]
def __init__(self, name, id, parents=[], children=[], meta = {}): self.name = name self.id = id self.parents = parents self.children = children self.meta = meta
[ "def", "__init__", "(", "self", ",", "name", ",", "id", ",", "parents", "=", "[", "]", ",", "children", "=", "[", "]", ",", "meta", "=", "{", "}", ")", ":", "self", ".", "name", "=", "name", "self", ".", "id", "=", "id", "self", ".", "parents", "=", "parents", "self", ".", "children", "=", "children", "self", ".", "meta", "=", "meta" ]
https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/GPy/util/mocap.py#L7-L12
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twill/twill/commands.py
python
find
(what, flags='')
>> find <regexp> [<flags>] Succeed if the regular expression is on the page. Sets the local variable __match__ to the matching text. Flags is a string consisting of the following characters: * i: ignorecase * m: multiline * s: dotall For explanations of these, please see the Python re module documentation.
>> find <regexp> [<flags>] Succeed if the regular expression is on the page. Sets the local variable __match__ to the matching text.
[ ">>", "find", "<regexp", ">", "[", "<flags", ">", "]", "Succeed", "if", "the", "regular", "expression", "is", "on", "the", "page", ".", "Sets", "the", "local", "variable", "__match__", "to", "the", "matching", "text", "." ]
def find(what, flags=''): """ >> find <regexp> [<flags>] Succeed if the regular expression is on the page. Sets the local variable __match__ to the matching text. Flags is a string consisting of the following characters: * i: ignorecase * m: multiline * s: dotall For explanations of these, please see the Python re module documentation. """ regexp = re.compile(what, _parseFindFlags(flags)) page = browser.get_html() m = regexp.search(page) if not m: raise TwillAssertionError("no match to '%s'" % (what,)) if m.groups(): match_str = m.group(1) else: match_str = m.group(0) _, local_dict = get_twill_glocals() local_dict['__match__'] = match_str
[ "def", "find", "(", "what", ",", "flags", "=", "''", ")", ":", "regexp", "=", "re", ".", "compile", "(", "what", ",", "_parseFindFlags", "(", "flags", ")", ")", "page", "=", "browser", ".", "get_html", "(", ")", "m", "=", "regexp", ".", "search", "(", "page", ")", "if", "not", "m", ":", "raise", "TwillAssertionError", "(", "\"no match to '%s'\"", "%", "(", "what", ",", ")", ")", "if", "m", ".", "groups", "(", ")", ":", "match_str", "=", "m", ".", "group", "(", "1", ")", "else", ":", "match_str", "=", "m", ".", "group", "(", "0", ")", "_", ",", "local_dict", "=", "get_twill_glocals", "(", ")", "local_dict", "[", "'__match__'", "]", "=", "match_str" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twill/twill/commands.py#L218-L247
1040003585/WebScrapingWithPython
a770fa5b03894076c8c9539b1ffff34424ffc016
portia_examle/lib/python2.7/site-packages/pip/_vendor/requests/models.py
python
Response.iter_lines
(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None)
Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe.
Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses.
[ "Iterates", "over", "the", "response", "data", "one", "line", "at", "a", "time", ".", "When", "stream", "=", "True", "is", "set", "on", "the", "request", "this", "avoids", "reading", "the", "content", "at", "once", "into", "memory", "for", "large", "responses", "." ]
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending
[ "def", "iter_lines", "(", "self", ",", "chunk_size", "=", "ITER_CHUNK_SIZE", ",", "decode_unicode", "=", "None", ",", "delimiter", "=", "None", ")", ":", "pending", "=", "None", "for", "chunk", "in", "self", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ",", "decode_unicode", "=", "decode_unicode", ")", ":", "if", "pending", "is", "not", "None", ":", "chunk", "=", "pending", "+", "chunk", "if", "delimiter", ":", "lines", "=", "chunk", ".", "split", "(", "delimiter", ")", "else", ":", "lines", "=", "chunk", ".", "splitlines", "(", ")", "if", "lines", "and", "lines", "[", "-", "1", "]", "and", "chunk", "and", "lines", "[", "-", "1", "]", "[", "-", "1", "]", "==", "chunk", "[", "-", "1", "]", ":", "pending", "=", "lines", ".", "pop", "(", ")", "else", ":", "pending", "=", "None", "for", "line", "in", "lines", ":", "yield", "line", "if", "pending", "is", "not", "None", ":", "yield", "pending" ]
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/pip/_vendor/requests/models.py#L710-L739
codelv/enaml-native
04c3a015bcd649f374c5ecd98fcddba5e4fbdbdc
src/enamlnative/ios/app.py
python
IPhoneApplication.load_plugin_factories
(self)
Add any plugin toolkit widgets to the ANDROID_FACTORIES
Add any plugin toolkit widgets to the ANDROID_FACTORIES
[ "Add", "any", "plugin", "toolkit", "widgets", "to", "the", "ANDROID_FACTORIES" ]
def load_plugin_factories(self): """ Add any plugin toolkit widgets to the ANDROID_FACTORIES """ for plugin in self.get_plugins(group='enaml_native_ios_factories'): get_factories = plugin.load() PLUGIN_FACTORIES = get_factories() factories.IOS_FACTORIES.update(PLUGIN_FACTORIES)
[ "def", "load_plugin_factories", "(", "self", ")", ":", "for", "plugin", "in", "self", ".", "get_plugins", "(", "group", "=", "'enaml_native_ios_factories'", ")", ":", "get_factories", "=", "plugin", ".", "load", "(", ")", "PLUGIN_FACTORIES", "=", "get_factories", "(", ")", "factories", ".", "IOS_FACTORIES", ".", "update", "(", "PLUGIN_FACTORIES", ")" ]
https://github.com/codelv/enaml-native/blob/04c3a015bcd649f374c5ecd98fcddba5e4fbdbdc/src/enamlnative/ios/app.py#L172-L177
playframework/play1
0ecac3bc2421ae2dbec27a368bf671eda1c9cba5
python/Lib/warnings.py
python
_show_warning
(message, category, filename, lineno, file=None, line=None)
Hook to write a warning to a file; replace if you like.
Hook to write a warning to a file; replace if you like.
[ "Hook", "to", "write", "a", "warning", "to", "a", "file", ";", "replace", "if", "you", "like", "." ]
def _show_warning(message, category, filename, lineno, file=None, line=None): """Hook to write a warning to a file; replace if you like.""" if file is None: file = sys.stderr if file is None: # sys.stderr is None - warnings get lost return try: file.write(formatwarning(message, category, filename, lineno, line)) except (IOError, UnicodeError): pass # the file (probably stderr) is invalid - this warning gets lost.
[ "def", "_show_warning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "file", "=", "None", ",", "line", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stderr", "if", "file", "is", "None", ":", "# sys.stderr is None - warnings get lost", "return", "try", ":", "file", ".", "write", "(", "formatwarning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "line", ")", ")", "except", "(", "IOError", ",", "UnicodeError", ")", ":", "pass", "# the file (probably stderr) is invalid - this warning gets lost." ]
https://github.com/playframework/play1/blob/0ecac3bc2421ae2dbec27a368bf671eda1c9cba5/python/Lib/warnings.py#L25-L35
fnl/segtok
ad7ff3b31b40787e8f3ca983db887684ab335fcf
count_continuations.py
python
abbreviation_pattern
(continuation)
return re.compile(pattern.format(continuation))
[]
def abbreviation_pattern(continuation): pattern = r'\.\s+{}\b' return re.compile(pattern.format(continuation))
[ "def", "abbreviation_pattern", "(", "continuation", ")", ":", "pattern", "=", "r'\\.\\s+{}\\b'", "return", "re", ".", "compile", "(", "pattern", ".", "format", "(", "continuation", ")", ")" ]
https://github.com/fnl/segtok/blob/ad7ff3b31b40787e8f3ca983db887684ab335fcf/count_continuations.py#L82-L84
hsokooti/RegNet
28a8b6132677bb58e9fc811c0dd15d78913c7e86
functions/setting/setting_utils.py
python
dsmoothlist_by_deform_exp
(deform_exp, ag_mode)
return dsmoothlist_training, dsmoothlist_validation
Automatically extract the selected artificial generations for training and validation set: 'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'], 'NoResp': ['single_frequency', 'mixed_frequency', 'zero'], 'SingleOnly': ['single_frequency'], 'MixedOnly': ['mixed_frequency'], 'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'], please note that for validation set we do not need to select all of them :param deform_exp: :param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization' :return:
Automatically extract the selected artificial generations for training and validation set: 'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'], 'NoResp': ['single_frequency', 'mixed_frequency', 'zero'], 'SingleOnly': ['single_frequency'], 'MixedOnly': ['mixed_frequency'], 'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'], please note that for validation set we do not need to select all of them :param deform_exp: :param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization' :return:
[ "Automatically", "extract", "the", "selected", "artificial", "generations", "for", "training", "and", "validation", "set", ":", "Resp", ":", "[", "respiratory_motion", "single_frequency", "mixed_frequency", "zero", "]", "NoResp", ":", "[", "single_frequency", "mixed_frequency", "zero", "]", "SingleOnly", ":", "[", "single_frequency", "]", "MixedOnly", ":", "[", "mixed_frequency", "]", "SingleResp", ":", "[", "single_frequency", "respiratory_motion", "zero", "]", "please", "note", "that", "for", "validation", "set", "we", "do", "not", "need", "to", "select", "all", "of", "them", ":", "param", "deform_exp", ":", ":", "param", "ag_mode", ":", "artificial", "generation", "mode", ":", "Resp", "NoResp", "SingleOnly", "MixedOnly", "SingleResp", "Visualization", ":", "return", ":" ]
def dsmoothlist_by_deform_exp(deform_exp, ag_mode): """ Automatically extract the selected artificial generations for training and validation set: 'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'], 'NoResp': ['single_frequency', 'mixed_frequency', 'zero'], 'SingleOnly': ['single_frequency'], 'MixedOnly': ['mixed_frequency'], 'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'], please note that for validation set we do not need to select all of them :param deform_exp: :param ag_mode: artificial generation mode: 'Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization' :return: """ if ag_mode not in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']: raise ValueError("exp_mode should be in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']") dsmoothlist_training = [] dsmoothlist_validation = [] deform_exp_setting = load_deform_exp_setting(deform_exp) all_deform_methods = deform_exp_setting['DeformMethods'] comp_dict = {'Resp': ['respiratory_motion', 'single_frequency', 'mixed_frequency', 'zero'], 'NoResp': ['single_frequency', 'mixed_frequency', 'zero'], 'SingleOnly': ['single_frequency'], 'MixedOnly': ['mixed_frequency'], 'SingleResp': ['single_frequency', 'respiratory_motion', 'zero'], 'Visualization': [] } for i, deform_method in enumerate(all_deform_methods): if deform_method in comp_dict[ag_mode]: dsmoothlist_training.append(i) if deform_exp in ['3D_max7_D14_K', '3D_max15_D14_K', '3D_max20_D14_K', '3D_max15_SingleFrequency_Visualization']: if ag_mode == 'Resp': dsmoothlist_validation = [0, 5, 10] elif ag_mode == 'NoResp': dsmoothlist_validation = [5, 8, 10] elif ag_mode == 'SingleResp': dsmoothlist_validation = [4, 8, 10] elif ag_mode == 'SingleOnly': dsmoothlist_validation = [5, 6, 8] elif ag_mode == 'MixedOnly': dsmoothlist_validation = [9, 10, 12] else: raise ValueError('dsmoothlist_validation not found for deform_exp='+deform_exp+', please add it manually') return dsmoothlist_training, dsmoothlist_validation
[ "def", "dsmoothlist_by_deform_exp", "(", "deform_exp", ",", "ag_mode", ")", ":", "if", "ag_mode", "not", "in", "[", "'Resp'", ",", "'NoResp'", ",", "'SingleOnly'", ",", "'MixedOnly'", ",", "'SingleResp'", ",", "'Visualization'", "]", ":", "raise", "ValueError", "(", "\"exp_mode should be in ['Resp', 'NoResp', 'SingleOnly', 'MixedOnly', 'SingleResp', 'Visualization']\"", ")", "dsmoothlist_training", "=", "[", "]", "dsmoothlist_validation", "=", "[", "]", "deform_exp_setting", "=", "load_deform_exp_setting", "(", "deform_exp", ")", "all_deform_methods", "=", "deform_exp_setting", "[", "'DeformMethods'", "]", "comp_dict", "=", "{", "'Resp'", ":", "[", "'respiratory_motion'", ",", "'single_frequency'", ",", "'mixed_frequency'", ",", "'zero'", "]", ",", "'NoResp'", ":", "[", "'single_frequency'", ",", "'mixed_frequency'", ",", "'zero'", "]", ",", "'SingleOnly'", ":", "[", "'single_frequency'", "]", ",", "'MixedOnly'", ":", "[", "'mixed_frequency'", "]", ",", "'SingleResp'", ":", "[", "'single_frequency'", ",", "'respiratory_motion'", ",", "'zero'", "]", ",", "'Visualization'", ":", "[", "]", "}", "for", "i", ",", "deform_method", "in", "enumerate", "(", "all_deform_methods", ")", ":", "if", "deform_method", "in", "comp_dict", "[", "ag_mode", "]", ":", "dsmoothlist_training", ".", "append", "(", "i", ")", "if", "deform_exp", "in", "[", "'3D_max7_D14_K'", ",", "'3D_max15_D14_K'", ",", "'3D_max20_D14_K'", ",", "'3D_max15_SingleFrequency_Visualization'", "]", ":", "if", "ag_mode", "==", "'Resp'", ":", "dsmoothlist_validation", "=", "[", "0", ",", "5", ",", "10", "]", "elif", "ag_mode", "==", "'NoResp'", ":", "dsmoothlist_validation", "=", "[", "5", ",", "8", ",", "10", "]", "elif", "ag_mode", "==", "'SingleResp'", ":", "dsmoothlist_validation", "=", "[", "4", ",", "8", ",", "10", "]", "elif", "ag_mode", "==", "'SingleOnly'", ":", "dsmoothlist_validation", "=", "[", "5", ",", "6", ",", "8", "]", "elif", "ag_mode", "==", "'MixedOnly'", ":", "dsmoothlist_validation", "=", "[", "9", ",", "10", ",", "12", "]", "else", ":", "raise", "ValueError", "(", "'dsmoothlist_validation not found for deform_exp='", "+", "deform_exp", "+", "', please add it manually'", ")", "return", "dsmoothlist_training", ",", "dsmoothlist_validation" ]
https://github.com/hsokooti/RegNet/blob/28a8b6132677bb58e9fc811c0dd15d78913c7e86/functions/setting/setting_utils.py#L710-L754
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/winproxy/apis/kernel32.py
python
lstrcmpA
(lpString1, lpString2)
return lstrcmpA.ctypes_function(lpString1, lpString2)
[]
def lstrcmpA(lpString1, lpString2): return lstrcmpA.ctypes_function(lpString1, lpString2)
[ "def", "lstrcmpA", "(", "lpString1", ",", "lpString2", ")", ":", "return", "lstrcmpA", ".", "ctypes_function", "(", "lpString1", ",", "lpString2", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/winproxy/apis/kernel32.py#L395-L396
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/distutils/ccompiler.py
python
CCompiler._fix_object_args
(self, objects, output_dir)
return (objects, output_dir)
Typecheck and fix up some arguments supplied to various methods. Specifically: ensure that 'objects' is a list; if output_dir is None, replace with self.output_dir. Return fixed versions of 'objects' and 'output_dir'.
Typecheck and fix up some arguments supplied to various methods. Specifically: ensure that 'objects' is a list; if output_dir is None, replace with self.output_dir. Return fixed versions of 'objects' and 'output_dir'.
[ "Typecheck", "and", "fix", "up", "some", "arguments", "supplied", "to", "various", "methods", ".", "Specifically", ":", "ensure", "that", "objects", "is", "a", "list", ";", "if", "output_dir", "is", "None", "replace", "with", "self", ".", "output_dir", ".", "Return", "fixed", "versions", "of", "objects", "and", "output_dir", "." ]
def _fix_object_args(self, objects, output_dir): """Typecheck and fix up some arguments supplied to various methods. Specifically: ensure that 'objects' is a list; if output_dir is None, replace with self.output_dir. Return fixed versions of 'objects' and 'output_dir'. """ if not isinstance(objects, (list, tuple)): raise TypeError, \ "'objects' must be a list or tuple of strings" objects = list (objects) if output_dir is None: output_dir = self.output_dir elif not isinstance(output_dir, str): raise TypeError, "'output_dir' must be a string or None" return (objects, output_dir)
[ "def", "_fix_object_args", "(", "self", ",", "objects", ",", "output_dir", ")", ":", "if", "not", "isinstance", "(", "objects", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", ",", "\"'objects' must be a list or tuple of strings\"", "objects", "=", "list", "(", "objects", ")", "if", "output_dir", "is", "None", ":", "output_dir", "=", "self", ".", "output_dir", "elif", "not", "isinstance", "(", "output_dir", ",", "str", ")", ":", "raise", "TypeError", ",", "\"'output_dir' must be a string or None\"", "return", "(", "objects", ",", "output_dir", ")" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/distutils/ccompiler.py#L408-L424
skylander86/lambda-text-extractor
6da52d077a2fc571e38bfe29c33ae68f6443cd5a
lib-linux_x64/requests/utils.py
python
add_dict_to_cookiejar
(cj, cookie_dict)
return cookiejar_from_dict(cookie_dict, cj)
Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar
Returns a CookieJar from a key/value dictionary.
[ "Returns", "a", "CookieJar", "from", "a", "key", "/", "value", "dictionary", "." ]
def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar """ return cookiejar_from_dict(cookie_dict, cj)
[ "def", "add_dict_to_cookiejar", "(", "cj", ",", "cookie_dict", ")", ":", "return", "cookiejar_from_dict", "(", "cookie_dict", ",", "cj", ")" ]
https://github.com/skylander86/lambda-text-extractor/blob/6da52d077a2fc571e38bfe29c33ae68f6443cd5a/lib-linux_x64/requests/utils.py#L379-L387
erevus-cn/pocscan
5fef32b1abe22a9f666ad3aacfd1f99d784cb72d
pocscan/plugins/pocsuite/packages/requests/packages/urllib3/packages/ordered_dict.py
python
OrderedDict.viewitems
(self)
return ItemsView(self)
od.viewitems() -> a set-like object providing a view on od's items
od.viewitems() -> a set-like object providing a view on od's items
[ "od", ".", "viewitems", "()", "-", ">", "a", "set", "-", "like", "object", "providing", "a", "view", "on", "od", "s", "items" ]
def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
[ "def", "viewitems", "(", "self", ")", ":", "return", "ItemsView", "(", "self", ")" ]
https://github.com/erevus-cn/pocscan/blob/5fef32b1abe22a9f666ad3aacfd1f99d784cb72d/pocscan/plugins/pocsuite/packages/requests/packages/urllib3/packages/ordered_dict.py#L258-L260
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Tools/webchecker/webchecker.py
python
Checker.openpage
(self, url_pair)
[]
def openpage(self, url_pair): url, fragment = url_pair try: return self.urlopener.open(url) except (OSError, IOError), msg: msg = self.sanitize(msg) self.note(0, "Error %s", msg) if self.verbose > 0: self.show(" HREF ", url, " from", self.todo[url_pair]) self.setbad(url_pair, msg) return None
[ "def", "openpage", "(", "self", ",", "url_pair", ")", ":", "url", ",", "fragment", "=", "url_pair", "try", ":", "return", "self", ".", "urlopener", ".", "open", "(", "url", ")", "except", "(", "OSError", ",", "IOError", ")", ",", "msg", ":", "msg", "=", "self", ".", "sanitize", "(", "msg", ")", "self", ".", "note", "(", "0", ",", "\"Error %s\"", ",", "msg", ")", "if", "self", ".", "verbose", ">", "0", ":", "self", ".", "show", "(", "\" HREF \"", ",", "url", ",", "\" from\"", ",", "self", ".", "todo", "[", "url_pair", "]", ")", "self", ".", "setbad", "(", "url_pair", ",", "msg", ")", "return", "None" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Tools/webchecker/webchecker.py#L540-L550
blinktrade/bitex
a4896e7faef9c4aa0ca5325f18b77db67003764e
libs/ws4py/manager.py
python
WebSocketManager.stop
(self)
Mark the manager as terminated and releases its resources.
Mark the manager as terminated and releases its resources.
[ "Mark", "the", "manager", "as", "terminated", "and", "releases", "its", "resources", "." ]
def stop(self): """ Mark the manager as terminated and releases its resources. """ self.running = False with self.lock: self.websockets.clear() self.poller.release()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "running", "=", "False", "with", "self", ".", "lock", ":", "self", ".", "websockets", ".", "clear", "(", ")", "self", ".", "poller", ".", "release", "(", ")" ]
https://github.com/blinktrade/bitex/blob/a4896e7faef9c4aa0ca5325f18b77db67003764e/libs/ws4py/manager.py#L216-L224
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/cloudinary/cache/storage/key_value_storage.py
python
KeyValueStorage.clear
(self)
Clears all entries :return: bool True on success or False on failure
Clears all entries
[ "Clears", "all", "entries" ]
def clear(self): """ Clears all entries :return: bool True on success or False on failure """ raise NotImplementedError
[ "def", "clear", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/cloudinary/cache/storage/key_value_storage.py#L45-L51
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_openshift_3.2/library/oadm_policy_user.py
python
OpenShiftCLI._process
(self, template_name, create=False, params=None, template_data=None)
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
return all pods
return all pods
[ "return", "all", "pods" ]
def _process(self, template_name, create=False, params=None, template_data=None): '''return all pods ''' cmd = ['process', '-n', self.namespace] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["%s=%s" % (key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = '/tmp/%s' % template_name yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
[ "def", "_process", "(", "self", ",", "template_name", ",", "create", "=", "False", ",", "params", "=", "None", ",", "template_data", "=", "None", ")", ":", "cmd", "=", "[", "'process'", ",", "'-n'", ",", "self", ".", "namespace", "]", "if", "template_data", ":", "cmd", ".", "extend", "(", "[", "'-f'", ",", "'-'", "]", ")", "else", ":", "cmd", ".", "append", "(", "template_name", ")", "if", "params", ":", "param_str", "=", "[", "\"%s=%s\"", "%", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", "]", "cmd", ".", "append", "(", "'-v'", ")", "cmd", ".", "extend", "(", "param_str", ")", "results", "=", "self", ".", "openshift_cmd", "(", "cmd", ",", "output", "=", "True", ",", "input_data", "=", "template_data", ")", "if", "results", "[", "'returncode'", "]", "!=", "0", "or", "not", "create", ":", "return", "results", "fname", "=", "'/tmp/%s'", "%", "template_name", "yed", "=", "Yedit", "(", "fname", ",", "results", "[", "'results'", "]", ")", "yed", ".", "write", "(", ")", "atexit", ".", "register", "(", "Utils", ".", "cleanup", ",", "[", "fname", "]", ")", "return", "self", ".", "openshift_cmd", "(", "[", "'-n'", ",", "self", ".", "namespace", ",", "'create'", ",", "'-f'", ",", "fname", "]", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_openshift_3.2/library/oadm_policy_user.py#L100-L123
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/apigateway/v20180808/models.py
python
UpdateApiAppKeyRequest.__init__
(self)
r""" :param ApiAppId: 应用唯一 ID。 :type ApiAppId: str :param ApiAppKey: 应用的Key。 :type ApiAppKey: str :param ApiAppSecret: 应用的Secret。 :type ApiAppSecret: str
r""" :param ApiAppId: 应用唯一 ID。 :type ApiAppId: str :param ApiAppKey: 应用的Key。 :type ApiAppKey: str :param ApiAppSecret: 应用的Secret。 :type ApiAppSecret: str
[ "r", ":", "param", "ApiAppId", ":", "应用唯一", "ID。", ":", "type", "ApiAppId", ":", "str", ":", "param", "ApiAppKey", ":", "应用的Key。", ":", "type", "ApiAppKey", ":", "str", ":", "param", "ApiAppSecret", ":", "应用的Secret。", ":", "type", "ApiAppSecret", ":", "str" ]
def __init__(self): r""" :param ApiAppId: 应用唯一 ID。 :type ApiAppId: str :param ApiAppKey: 应用的Key。 :type ApiAppKey: str :param ApiAppSecret: 应用的Secret。 :type ApiAppSecret: str """ self.ApiAppId = None self.ApiAppKey = None self.ApiAppSecret = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "ApiAppId", "=", "None", "self", ".", "ApiAppKey", "=", "None", "self", ".", "ApiAppSecret", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/apigateway/v20180808/models.py#L9094-L9105
plizonczyk/noiseprotocol
73375448c55af85df0230841af868b7f31942f0a
noise/state.py
python
SymmetricState.encrypt_and_hash
(self, plaintext: bytes)
return ciphertext
Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext. :param plaintext: bytes sequence :return: ciphertext bytes sequence
Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext.
[ "Sets", "ciphertext", "=", "EncryptWithAd", "(", "h", "plaintext", ")", "calls", "MixHash", "(", "ciphertext", ")", "and", "returns", "ciphertext", ".", "Note", "that", "if", "k", "is", "empty", "the", "EncryptWithAd", "()", "call", "will", "set", "ciphertext", "equal", "to", "plaintext", "." ]
def encrypt_and_hash(self, plaintext: bytes) -> bytes: """ Sets ciphertext = EncryptWithAd(h, plaintext), calls MixHash(ciphertext), and returns ciphertext. Note that if k is empty, the EncryptWithAd() call will set ciphertext equal to plaintext. :param plaintext: bytes sequence :return: ciphertext bytes sequence """ ciphertext = self.cipher_state.encrypt_with_ad(self.h, plaintext) self.mix_hash(ciphertext) return ciphertext
[ "def", "encrypt_and_hash", "(", "self", ",", "plaintext", ":", "bytes", ")", "->", "bytes", ":", "ciphertext", "=", "self", ".", "cipher_state", ".", "encrypt_with_ad", "(", "self", ".", "h", ",", "plaintext", ")", "self", ".", "mix_hash", "(", "ciphertext", ")", "return", "ciphertext" ]
https://github.com/plizonczyk/noiseprotocol/blob/73375448c55af85df0230841af868b7f31942f0a/noise/state.py#L164-L174
aiidateam/aiida-core
c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2
aiida/backends/sqlalchemy/migrations/versions/375c2db70663_dblog_uuid_uniqueness_constraint.py
python
upgrade
()
Add unique key constraint to the UUID column.
Add unique key constraint to the UUID column.
[ "Add", "unique", "key", "constraint", "to", "the", "UUID", "column", "." ]
def upgrade(): """Add unique key constraint to the UUID column.""" op.create_unique_constraint('db_dblog_uuid_key', 'db_dblog', ['uuid'])
[ "def", "upgrade", "(", ")", ":", "op", ".", "create_unique_constraint", "(", "'db_dblog_uuid_key'", ",", "'db_dblog'", ",", "[", "'uuid'", "]", ")" ]
https://github.com/aiidateam/aiida-core/blob/c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2/aiida/backends/sqlalchemy/migrations/versions/375c2db70663_dblog_uuid_uniqueness_constraint.py#L29-L31
b1naryth1ef/disco
7a2e86a2a3dc58e207d2c6e9a686a3296d652864
disco/types/message.py
python
Message.guild
(self)
return self.channel.guild
Returns ------- `Guild` The guild (if applicable) this message was created in.
Returns ------- `Guild` The guild (if applicable) this message was created in.
[ "Returns", "-------", "Guild", "The", "guild", "(", "if", "applicable", ")", "this", "message", "was", "created", "in", "." ]
def guild(self): """ Returns ------- `Guild` The guild (if applicable) this message was created in. """ return self.channel.guild
[ "def", "guild", "(", "self", ")", ":", "return", "self", ".", "channel", ".", "guild" ]
https://github.com/b1naryth1ef/disco/blob/7a2e86a2a3dc58e207d2c6e9a686a3296d652864/disco/types/message.py#L376-L383
openstack/heat
ea6633c35b04bb49c4a2858edc9df0a82d039478
heat/engine/resource.py
python
Resource.handle_delete
(self)
return None
Default implementation; should be overridden by resources.
Default implementation; should be overridden by resources.
[ "Default", "implementation", ";", "should", "be", "overridden", "by", "resources", "." ]
def handle_delete(self): """Default implementation; should be overridden by resources.""" if self.entity and self.resource_id is not None: with self._default_client_plugin().ignore_not_found: obj = getattr(self.client(), self.entity) obj.delete(self.resource_id) return self.resource_id return None
[ "def", "handle_delete", "(", "self", ")", ":", "if", "self", ".", "entity", "and", "self", ".", "resource_id", "is", "not", "None", ":", "with", "self", ".", "_default_client_plugin", "(", ")", ".", "ignore_not_found", ":", "obj", "=", "getattr", "(", "self", ".", "client", "(", ")", ",", "self", ".", "entity", ")", "obj", ".", "delete", "(", "self", ".", "resource_id", ")", "return", "self", ".", "resource_id", "return", "None" ]
https://github.com/openstack/heat/blob/ea6633c35b04bb49c4a2858edc9df0a82d039478/heat/engine/resource.py#L1965-L1972
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/six.py
python
MovedAttribute.__init__
(self, name, old_mod, new_mod, old_attr=None, new_attr=None)
[]
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr
[ "def", "__init__", "(", "self", ",", "name", ",", "old_mod", ",", "new_mod", ",", "old_attr", "=", "None", ",", "new_attr", "=", "None", ")", ":", "super", "(", "MovedAttribute", ",", "self", ")", ".", "__init__", "(", "name", ")", "if", "PY3", ":", "if", "new_mod", "is", "None", ":", "new_mod", "=", "name", "self", ".", "mod", "=", "new_mod", "if", "new_attr", "is", "None", ":", "if", "old_attr", "is", "None", ":", "new_attr", "=", "name", "else", ":", "new_attr", "=", "old_attr", "self", ".", "attr", "=", "new_attr", "else", ":", "self", ".", "mod", "=", "old_mod", "if", "old_attr", "is", "None", ":", "old_attr", "=", "name", "self", ".", "attr", "=", "old_attr" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/six.py#L141-L157