repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
xapple/plumbing
plumbing/databases/sqlite_database.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/sqlite_database.py#L174-L185
def add_table(self, name, columns, type_map=None, if_not_exists=False): """Add add a new table to the database. For instance you could do this: self.add_table('data', {'id':'integer', 'source':'text', 'pubmed':'integer'})""" # Check types mapping # if type_map is None and isinstance(columns, dict): types = columns if type_map is None: types = {} # Safe or unsafe # if if_not_exists: query = 'CREATE TABLE IF NOT EXISTS "%s" (%s);' else: query = 'CREATE table "%s" (%s);' # Do it # cols = ','.join(['"' + c + '"' + ' ' + types.get(c, 'text') for c in columns]) self.own_cursor.execute(query % (self.main_table, cols))
[ "def", "add_table", "(", "self", ",", "name", ",", "columns", ",", "type_map", "=", "None", ",", "if_not_exists", "=", "False", ")", ":", "# Check types mapping #", "if", "type_map", "is", "None", "and", "isinstance", "(", "columns", ",", "dict", ")", ":", "types", "=", "columns", "if", "type_map", "is", "None", ":", "types", "=", "{", "}", "# Safe or unsafe #", "if", "if_not_exists", ":", "query", "=", "'CREATE TABLE IF NOT EXISTS \"%s\" (%s);'", "else", ":", "query", "=", "'CREATE table \"%s\" (%s);'", "# Do it #", "cols", "=", "','", ".", "join", "(", "[", "'\"'", "+", "c", "+", "'\"'", "+", "' '", "+", "types", ".", "get", "(", "c", ",", "'text'", ")", "for", "c", "in", "columns", "]", ")", "self", ".", "own_cursor", ".", "execute", "(", "query", "%", "(", "self", ".", "main_table", ",", "cols", ")", ")" ]
Add add a new table to the database. For instance you could do this: self.add_table('data', {'id':'integer', 'source':'text', 'pubmed':'integer'})
[ "Add", "add", "a", "new", "table", "to", "the", "database", ".", "For", "instance", "you", "could", "do", "this", ":", "self", ".", "add_table", "(", "data", "{", "id", ":", "integer", "source", ":", "text", "pubmed", ":", "integer", "}", ")" ]
python
train
bwesterb/tkbd
src/ruuster.py
https://github.com/bwesterb/tkbd/blob/fcf16977d38a93fe9b7fa198513007ab9921b650/src/ruuster.py#L43-L55
def fetch_room_ids(self, names): """ Fetches the ids of the rooms with the given names """ ret = {} names_set = set(names) try: for d in msgpack.unpack(urllib2.urlopen( "%s/list/locations?format=msgpack" % self.url)): name = d['name'].upper() # normalize: Hg -> HG if name in names_set: ret[name] = d['id'] except urllib2.HTTPError, e: raise RuusterError(e) return ret
[ "def", "fetch_room_ids", "(", "self", ",", "names", ")", ":", "ret", "=", "{", "}", "names_set", "=", "set", "(", "names", ")", "try", ":", "for", "d", "in", "msgpack", ".", "unpack", "(", "urllib2", ".", "urlopen", "(", "\"%s/list/locations?format=msgpack\"", "%", "self", ".", "url", ")", ")", ":", "name", "=", "d", "[", "'name'", "]", ".", "upper", "(", ")", "# normalize: Hg -> HG", "if", "name", "in", "names_set", ":", "ret", "[", "name", "]", "=", "d", "[", "'id'", "]", "except", "urllib2", ".", "HTTPError", ",", "e", ":", "raise", "RuusterError", "(", "e", ")", "return", "ret" ]
Fetches the ids of the rooms with the given names
[ "Fetches", "the", "ids", "of", "the", "rooms", "with", "the", "given", "names" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3572-L3577
def getDefaultApplicationForMimeType(self, pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen): """return the app key that will open this mime type""" fn = self.function_table.getDefaultApplicationForMimeType result = fn(pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen) return result
[ "def", "getDefaultApplicationForMimeType", "(", "self", ",", "pchMimeType", ",", "pchAppKeyBuffer", ",", "unAppKeyBufferLen", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getDefaultApplicationForMimeType", "result", "=", "fn", "(", "pchMimeType", ",", "pchAppKeyBuffer", ",", "unAppKeyBufferLen", ")", "return", "result" ]
return the app key that will open this mime type
[ "return", "the", "app", "key", "that", "will", "open", "this", "mime", "type" ]
python
train
deepmipt/DeepPavlov
deeppavlov/utils/alexa/server.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/server.py#L84-L274
def run_alexa_server(agent_generator: callable, multi_instance: bool = False, stateful: bool = False, port: Optional[int] = None, https: bool = False, ssl_key: str = None, ssl_cert: str = None) -> None: """Initiates Flask web service with Alexa skill. Args: agent_generator: Callback Alexa agents factory. multi_instance: Multi instance mode flag. stateful: Stateful mode flag. port: Flask web service port. https: Flag for running Alexa skill service in https mode. ssl_key: SSL key file path. ssl_cert: SSL certificate file path. """ server_config_path = Path(get_settings_path(), SERVER_CONFIG_FILENAME).resolve() server_params = read_json(server_config_path) host = server_params['common_defaults']['host'] port = port or server_params['common_defaults']['port'] alexa_server_params = server_params['alexa_defaults'] alexa_server_params['multi_instance'] = multi_instance or server_params['common_defaults']['multi_instance'] alexa_server_params['stateful'] = stateful or server_params['common_defaults']['stateful'] alexa_server_params['amazon_cert_lifetime'] = AMAZON_CERTIFICATE_LIFETIME if https: ssh_key_path = Path(ssl_key or server_params['https_key_path']).resolve() if not ssh_key_path.is_file(): e = FileNotFoundError('Ssh key file not found: please provide correct path in --key param or ' 'https_key_path param in server configuration file') log.error(e) raise e ssh_cert_path = Path(ssl_cert or server_params['https_cert_path']).resolve() if not ssh_cert_path.is_file(): e = FileNotFoundError('Ssh certificate file not found: please provide correct path in --cert param or ' 'https_cert_path param in server configuration file') log.error(e) raise e ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) ssl_context.load_cert_chain(ssh_cert_path, ssh_key_path) else: ssl_context = None input_q = Queue() output_q = Queue() bot = Bot(agent_generator, alexa_server_params, input_q, output_q) bot.start() endpoint_description = { 'description': 'Amazon Alexa custom service endpoint', 'parameters': [ { 'name': 'Signature', 'in': 'header', 'required': 'true', 'type': 'string', 'example': 'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw==', }, { 'name': 'Signaturecertchainurl', 'in': 'header', 'required': 'true', 'type': 'string', 'example': 'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem', }, { 'name': 'data', 'in': 'body', 'required': 'true', 'example': { 'version': '1.0', 'session': { 'new': False, 'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63', 'application': { 'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6' }, 'attributes': { 'sessionId': 'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63' }, 'user': { 'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ' } }, 'context': { 'System': { 'application': { 'applicationId': 'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6' }, 'user': { 'userId': 'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ' }, 'device': { 'deviceId': 'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2', 'supportedInterfaces': {} }, 'apiEndpoint': 'https://api.amazonalexa.com', 'apiAccessToken': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw' }, 'Viewport': { 'experiences': [ { 'arcMinuteWidth': 246, 'arcMinuteHeight': 144, 'canRotate': False, 'canResize': False } ], 'shape': 'RECTANGLE', 'pixelWidth': 1024, 'pixelHeight': 600, 'dpi': 160, 'currentPixelWidth': 1024, 'currentPixelHeight': 600, 'touch': [ 'SINGLE' ] } }, 'request': { 'type': 'IntentRequest', 'requestId': 'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a', 'timestamp': '2018-12-19T11:47:38Z', 'locale': 'en-US', 'intent': { 'name': 'AskDeepPavlov', 'confirmationStatus': 'NONE', 'slots': { 'raw_input': { 'name': 'raw_input', 'value': 'my beautiful sandbox skill', 'resolutions': { 'resolutionsPerAuthority': [ { 'authority': 'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput', 'status': { 'code': 'ER_SUCCESS_NO_MATCH' } } ] }, 'confirmationStatus': 'NONE', 'source': 'USER' } } } } } } ], 'responses': { "200": { "description": "A model response" } } } @app.route('/') def index(): return redirect('/apidocs/') @app.route('/interact', methods=['POST']) @swag_from(endpoint_description) def handle_request(): request_body: bytes = request.get_data() signature_chain_url: str = request.headers.get('Signaturecertchainurl') signature: str = request.headers.get('Signature') alexa_request: dict = request.get_json() request_dict = { 'request_body': request_body, 'signature_chain_url': signature_chain_url, 'signature': signature, 'alexa_request': alexa_request } bot.input_queue.put(request_dict) response: dict = bot.output_queue.get() response_code = 400 if 'error' in response.keys() else 200 return jsonify(response), response_code app.run(host=host, port=port, threaded=True, ssl_context=ssl_context)
[ "def", "run_alexa_server", "(", "agent_generator", ":", "callable", ",", "multi_instance", ":", "bool", "=", "False", ",", "stateful", ":", "bool", "=", "False", ",", "port", ":", "Optional", "[", "int", "]", "=", "None", ",", "https", ":", "bool", "=", "False", ",", "ssl_key", ":", "str", "=", "None", ",", "ssl_cert", ":", "str", "=", "None", ")", "->", "None", ":", "server_config_path", "=", "Path", "(", "get_settings_path", "(", ")", ",", "SERVER_CONFIG_FILENAME", ")", ".", "resolve", "(", ")", "server_params", "=", "read_json", "(", "server_config_path", ")", "host", "=", "server_params", "[", "'common_defaults'", "]", "[", "'host'", "]", "port", "=", "port", "or", "server_params", "[", "'common_defaults'", "]", "[", "'port'", "]", "alexa_server_params", "=", "server_params", "[", "'alexa_defaults'", "]", "alexa_server_params", "[", "'multi_instance'", "]", "=", "multi_instance", "or", "server_params", "[", "'common_defaults'", "]", "[", "'multi_instance'", "]", "alexa_server_params", "[", "'stateful'", "]", "=", "stateful", "or", "server_params", "[", "'common_defaults'", "]", "[", "'stateful'", "]", "alexa_server_params", "[", "'amazon_cert_lifetime'", "]", "=", "AMAZON_CERTIFICATE_LIFETIME", "if", "https", ":", "ssh_key_path", "=", "Path", "(", "ssl_key", "or", "server_params", "[", "'https_key_path'", "]", ")", ".", "resolve", "(", ")", "if", "not", "ssh_key_path", ".", "is_file", "(", ")", ":", "e", "=", "FileNotFoundError", "(", "'Ssh key file not found: please provide correct path in --key param or '", "'https_key_path param in server configuration file'", ")", "log", ".", "error", "(", "e", ")", "raise", "e", "ssh_cert_path", "=", "Path", "(", "ssl_cert", "or", "server_params", "[", "'https_cert_path'", "]", ")", ".", "resolve", "(", ")", "if", "not", "ssh_cert_path", ".", "is_file", "(", ")", ":", "e", "=", "FileNotFoundError", "(", "'Ssh certificate file not found: please provide correct path in --cert param or '", "'https_cert_path param in server configuration file'", ")", "log", ".", "error", "(", "e", ")", "raise", "e", "ssl_context", "=", "ssl", ".", "SSLContext", "(", "ssl", ".", "PROTOCOL_TLSv1_2", ")", "ssl_context", ".", "load_cert_chain", "(", "ssh_cert_path", ",", "ssh_key_path", ")", "else", ":", "ssl_context", "=", "None", "input_q", "=", "Queue", "(", ")", "output_q", "=", "Queue", "(", ")", "bot", "=", "Bot", "(", "agent_generator", ",", "alexa_server_params", ",", "input_q", ",", "output_q", ")", "bot", ".", "start", "(", ")", "endpoint_description", "=", "{", "'description'", ":", "'Amazon Alexa custom service endpoint'", ",", "'parameters'", ":", "[", "{", "'name'", ":", "'Signature'", ",", "'in'", ":", "'header'", ",", "'required'", ":", "'true'", ",", "'type'", ":", "'string'", ",", "'example'", ":", "'Z5H5wqd06ExFVPNfJiqhKvAFjkf+cTVodOUirucHGcEVAMO1LfvgqWUkZ/X1ITDZbI0w+SMwVkEQZlkeThbVS/54M22StNDUtfz4Ua20xNDpIPwcWIACAmZ38XxbbTEFJI5WwqrbilNcfzqiGrIPfdO5rl+/xUjHFUdcJdUY/QzBxXsceytVYfEiR9MzOCN2m4C0XnpThUavAu159KrLj8AkuzN0JF87iXv+zOEeZRgEuwmsAnJrRUwkJ4yWokEPnSVdjF0D6f6CscfyvRe9nsWShq7/zRTa41meweh+n006zvf58MbzRdXPB22RI4AN0ksWW7hSC8/QLAKQE+lvaw=='", ",", "}", ",", "{", "'name'", ":", "'Signaturecertchainurl'", ",", "'in'", ":", "'header'", ",", "'required'", ":", "'true'", ",", "'type'", ":", "'string'", ",", "'example'", ":", "'https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem'", ",", "}", ",", "{", "'name'", ":", "'data'", ",", "'in'", ":", "'body'", ",", "'required'", ":", "'true'", ",", "'example'", ":", "{", "'version'", ":", "'1.0'", ",", "'session'", ":", "{", "'new'", ":", "False", ",", "'sessionId'", ":", "'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'", ",", "'application'", ":", "{", "'applicationId'", ":", "'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'", "}", ",", "'attributes'", ":", "{", "'sessionId'", ":", "'amzn1.echo-api.session.3c6ebffd-55b9-4e1a-bf3c-c921c1801b63'", "}", ",", "'user'", ":", "{", "'userId'", ":", "'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'", "}", "}", ",", "'context'", ":", "{", "'System'", ":", "{", "'application'", ":", "{", "'applicationId'", ":", "'amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6'", "}", ",", "'user'", ":", "{", "'userId'", ":", "'amzn1.ask.account.AGR4R2LOVHMNMNOGROBVNLU7CL4C57X465XJF2T2F55OUXNTLCXDQP3I55UXZIALEKKZJ6Q2MA5MEFSMZVPEL5NVZS6FZLEU444BVOLPB5WVH5CHYTQAKGD7VFLGPRFZVHHH2NIB4HKNHHGX6HM6S6QDWCKXWOIZL7ONNQSBUCVPMZQKMCYXRG5BA2POYEXFDXRXCGEVDWVSMPQ'", "}", ",", "'device'", ":", "{", "'deviceId'", ":", "'amzn1.ask.device.AFQAMLYOYQUUACSE7HFVYS4ZI2KUB35JPHQRUPKTDCAU3A47WESP5L57KSWT5L6RT3FVXWH4OA2DNPJRMZ2VGEIACF3PJEIDCOUWUBC4W5RPJNUB3ZVT22J4UJN5UL3T2UBP36RVHFJ5P4IPT2HUY3P2YOY33IOU4O33HUAG7R2BUNROEH4T2'", ",", "'supportedInterfaces'", ":", "{", "}", "}", ",", "'apiEndpoint'", ":", "'https://api.amazonalexa.com'", ",", "'apiAccessToken'", ":", "'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjhiMTdhNWRlLTM3NDktNDkxOS1hYTFmLWUwYmJhZjhhNDZhNiIsImV4cCI6MTU0NTIyMzY1OCwiaWF0IjoxNTQ1MjIwMDU4LCJuYmYiOjE1NDUyMjAwNTgsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZRQU1MWU9ZUVVVQUNTRTdIRlZZUzRaSTJLVUIzNUpQSFFSVVBLVERDQVUzQTQ3V0VTUDVMNTdLU1dUNUw2UlQzRlZYV0g0T0EyRE5QSlJNWjJWR0VJQUNGM1BKRUlEQ09VV1VCQzRXNVJQSk5VQjNaVlQyMko0VUpONVVMM1QyVUJQMzZSVkhGSjVQNElQVDJIVVkzUDJZT1kzM0lPVTRPMzNIVUFHN1IyQlVOUk9FSDRUMiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFHUjRSMkxPVkhNTk1OT0dST0JWTkxVN0NMNEM1N1g0NjVYSkYyVDJGNTVPVVhOVExDWERRUDNJNTVVWFpJQUxFS0taSjZRMk1BNU1FRlNNWlZQRUw1TlZaUzZGWkxFVTQ0NEJWT0xQQjVXVkg1Q0hZVFFBS0dEN1ZGTEdQUkZaVkhISDJOSUI0SEtOSEhHWDZITTZTNlFEV0NLWFdPSVpMN09OTlFTQlVDVlBNWlFLTUNZWFJHNUJBMlBPWUVYRkRYUlhDR0VWRFdWU01QUSJ9fQ.jcomYhBhU485T4uoe2NyhWnL-kZHoPQKpcycFqa-1sy_lSIitfFGup9DKrf2NkN-I9lZ3xwq9llqx9WRN78fVJjN6GLcDhBDH0irPwt3n9_V7_5bfB6KARv5ZG-JKOmZlLBqQbnln0DAJ10D8HNiytMARNEwduMBVDNK0A5z6YxtRcLYYFD2-Ieg_V8Qx90eE2pd2U5xOuIEL0pXfSoiJ8vpxb8BKwaMO47tdE4qhg_k7v8ClwyXg3EMEhZFjixYNqdW1tCrwDGj58IWMXDyzZhIlRMh6uudMOT6scSzcNVD0v42IOTZ3S_X6rG01B7xhUDlZXMqkrCuzOyqctGaPw'", "}", ",", "'Viewport'", ":", "{", "'experiences'", ":", "[", "{", "'arcMinuteWidth'", ":", "246", ",", "'arcMinuteHeight'", ":", "144", ",", "'canRotate'", ":", "False", ",", "'canResize'", ":", "False", "}", "]", ",", "'shape'", ":", "'RECTANGLE'", ",", "'pixelWidth'", ":", "1024", ",", "'pixelHeight'", ":", "600", ",", "'dpi'", ":", "160", ",", "'currentPixelWidth'", ":", "1024", ",", "'currentPixelHeight'", ":", "600", ",", "'touch'", ":", "[", "'SINGLE'", "]", "}", "}", ",", "'request'", ":", "{", "'type'", ":", "'IntentRequest'", ",", "'requestId'", ":", "'amzn1.echo-api.request.388d0f6e-04b9-4450-a687-b9abaa73ac6a'", ",", "'timestamp'", ":", "'2018-12-19T11:47:38Z'", ",", "'locale'", ":", "'en-US'", ",", "'intent'", ":", "{", "'name'", ":", "'AskDeepPavlov'", ",", "'confirmationStatus'", ":", "'NONE'", ",", "'slots'", ":", "{", "'raw_input'", ":", "{", "'name'", ":", "'raw_input'", ",", "'value'", ":", "'my beautiful sandbox skill'", ",", "'resolutions'", ":", "{", "'resolutionsPerAuthority'", ":", "[", "{", "'authority'", ":", "'amzn1.er-authority.echo-sdk.amzn1.ask.skill.8b17a5de-3749-4919-aa1f-e0bbaf8a46a6.GetInput'", ",", "'status'", ":", "{", "'code'", ":", "'ER_SUCCESS_NO_MATCH'", "}", "}", "]", "}", ",", "'confirmationStatus'", ":", "'NONE'", ",", "'source'", ":", "'USER'", "}", "}", "}", "}", "}", "}", "]", ",", "'responses'", ":", "{", "\"200\"", ":", "{", "\"description\"", ":", "\"A model response\"", "}", "}", "}", "@", "app", ".", "route", "(", "'/'", ")", "def", "index", "(", ")", ":", "return", "redirect", "(", "'/apidocs/'", ")", "@", "app", ".", "route", "(", "'/interact'", ",", "methods", "=", "[", "'POST'", "]", ")", "@", "swag_from", "(", "endpoint_description", ")", "def", "handle_request", "(", ")", ":", "request_body", ":", "bytes", "=", "request", ".", "get_data", "(", ")", "signature_chain_url", ":", "str", "=", "request", ".", "headers", ".", "get", "(", "'Signaturecertchainurl'", ")", "signature", ":", "str", "=", "request", ".", "headers", ".", "get", "(", "'Signature'", ")", "alexa_request", ":", "dict", "=", "request", ".", "get_json", "(", ")", "request_dict", "=", "{", "'request_body'", ":", "request_body", ",", "'signature_chain_url'", ":", "signature_chain_url", ",", "'signature'", ":", "signature", ",", "'alexa_request'", ":", "alexa_request", "}", "bot", ".", "input_queue", ".", "put", "(", "request_dict", ")", "response", ":", "dict", "=", "bot", ".", "output_queue", ".", "get", "(", ")", "response_code", "=", "400", "if", "'error'", "in", "response", ".", "keys", "(", ")", "else", "200", "return", "jsonify", "(", "response", ")", ",", "response_code", "app", ".", "run", "(", "host", "=", "host", ",", "port", "=", "port", ",", "threaded", "=", "True", ",", "ssl_context", "=", "ssl_context", ")" ]
Initiates Flask web service with Alexa skill. Args: agent_generator: Callback Alexa agents factory. multi_instance: Multi instance mode flag. stateful: Stateful mode flag. port: Flask web service port. https: Flag for running Alexa skill service in https mode. ssl_key: SSL key file path. ssl_cert: SSL certificate file path.
[ "Initiates", "Flask", "web", "service", "with", "Alexa", "skill", "." ]
python
test
nerdvegas/rez
src/rez/rex.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/rex.py#L783-L816
def split(self, delimiter=None): """Same as string.split(), but retains literal/expandable structure. Returns: List of `EscapedString`. """ result = [] strings = self.strings[:] current = None while strings: is_literal, value = strings[0] parts = value.split(delimiter, 1) if len(parts) > 1: value1, value2 = parts strings[0] = (is_literal, value2) out = EscapedString(value1, is_literal) push = True else: strings = strings[1:] out = EscapedString(value, is_literal) push = False if current is None: current = out else: current = current + out if push: result.append(current) current = None if current: result.append(current) return result
[ "def", "split", "(", "self", ",", "delimiter", "=", "None", ")", ":", "result", "=", "[", "]", "strings", "=", "self", ".", "strings", "[", ":", "]", "current", "=", "None", "while", "strings", ":", "is_literal", ",", "value", "=", "strings", "[", "0", "]", "parts", "=", "value", ".", "split", "(", "delimiter", ",", "1", ")", "if", "len", "(", "parts", ")", ">", "1", ":", "value1", ",", "value2", "=", "parts", "strings", "[", "0", "]", "=", "(", "is_literal", ",", "value2", ")", "out", "=", "EscapedString", "(", "value1", ",", "is_literal", ")", "push", "=", "True", "else", ":", "strings", "=", "strings", "[", "1", ":", "]", "out", "=", "EscapedString", "(", "value", ",", "is_literal", ")", "push", "=", "False", "if", "current", "is", "None", ":", "current", "=", "out", "else", ":", "current", "=", "current", "+", "out", "if", "push", ":", "result", ".", "append", "(", "current", ")", "current", "=", "None", "if", "current", ":", "result", ".", "append", "(", "current", ")", "return", "result" ]
Same as string.split(), but retains literal/expandable structure. Returns: List of `EscapedString`.
[ "Same", "as", "string", ".", "split", "()", "but", "retains", "literal", "/", "expandable", "structure", "." ]
python
train
deployed/django-emailtemplates
emailtemplates/registry.py
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/registry.py#L137-L145
def get_form_help_text(self, path): """ Returns text that can be used as form help text for creating email templates. """ try: form_help_text = self.get_registration(path).as_form_help_text() except NotRegistered: form_help_text = u"" return form_help_text
[ "def", "get_form_help_text", "(", "self", ",", "path", ")", ":", "try", ":", "form_help_text", "=", "self", ".", "get_registration", "(", "path", ")", ".", "as_form_help_text", "(", ")", "except", "NotRegistered", ":", "form_help_text", "=", "u\"\"", "return", "form_help_text" ]
Returns text that can be used as form help text for creating email templates.
[ "Returns", "text", "that", "can", "be", "used", "as", "form", "help", "text", "for", "creating", "email", "templates", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3823-L3838
def _coerce_scalar_to_index(self, item): """ We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce """ dtype = self.dtype if self._is_numeric_dtype and isna(item): # We can't coerce to the numeric dtype of "self" (unless # it's float) if there are NaN values in our output. dtype = None return Index([item], dtype=dtype, **self._get_attributes_dict())
[ "def", "_coerce_scalar_to_index", "(", "self", ",", "item", ")", ":", "dtype", "=", "self", ".", "dtype", "if", "self", ".", "_is_numeric_dtype", "and", "isna", "(", "item", ")", ":", "# We can't coerce to the numeric dtype of \"self\" (unless", "# it's float) if there are NaN values in our output.", "dtype", "=", "None", "return", "Index", "(", "[", "item", "]", ",", "dtype", "=", "dtype", ",", "*", "*", "self", ".", "_get_attributes_dict", "(", ")", ")" ]
We need to coerce a scalar to a compat for our index type. Parameters ---------- item : scalar item to coerce
[ "We", "need", "to", "coerce", "a", "scalar", "to", "a", "compat", "for", "our", "index", "type", "." ]
python
train
fermiPy/fermipy
fermipy/wcs_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/wcs_utils.py#L277-L300
def pix_to_skydir(xpix, ypix, wcs): """Convert pixel coordinates to a skydir object. Gracefully handles 0-d coordinate arrays. Always returns a celestial coordinate. Parameters ---------- xpix : `numpy.ndarray` ypix : `numpy.ndarray` wcs : `~astropy.wcs.WCS` """ xpix = np.array(xpix) ypix = np.array(ypix) if xpix.ndim > 0 and len(xpix) == 0: return SkyCoord(np.empty(0), np.empty(0), unit='deg', frame='icrs') return SkyCoord.from_pixel(xpix, ypix, wcs, origin=0).transform_to('icrs')
[ "def", "pix_to_skydir", "(", "xpix", ",", "ypix", ",", "wcs", ")", ":", "xpix", "=", "np", ".", "array", "(", "xpix", ")", "ypix", "=", "np", ".", "array", "(", "ypix", ")", "if", "xpix", ".", "ndim", ">", "0", "and", "len", "(", "xpix", ")", "==", "0", ":", "return", "SkyCoord", "(", "np", ".", "empty", "(", "0", ")", ",", "np", ".", "empty", "(", "0", ")", ",", "unit", "=", "'deg'", ",", "frame", "=", "'icrs'", ")", "return", "SkyCoord", ".", "from_pixel", "(", "xpix", ",", "ypix", ",", "wcs", ",", "origin", "=", "0", ")", ".", "transform_to", "(", "'icrs'", ")" ]
Convert pixel coordinates to a skydir object. Gracefully handles 0-d coordinate arrays. Always returns a celestial coordinate. Parameters ---------- xpix : `numpy.ndarray` ypix : `numpy.ndarray` wcs : `~astropy.wcs.WCS`
[ "Convert", "pixel", "coordinates", "to", "a", "skydir", "object", "." ]
python
train
rootpy/rootpy
rootpy/plotting/base.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/base.py#L376-L385
def SetMarkerColor(self, color): """ *color* may be any color understood by ROOT or matplotlib. For full documentation of accepted *color* arguments, see :class:`rootpy.plotting.style.Color`. """ self._markercolor = Color(color) if isinstance(self, ROOT.TAttMarker): ROOT.TAttMarker.SetMarkerColor(self, self._markercolor('root'))
[ "def", "SetMarkerColor", "(", "self", ",", "color", ")", ":", "self", ".", "_markercolor", "=", "Color", "(", "color", ")", "if", "isinstance", "(", "self", ",", "ROOT", ".", "TAttMarker", ")", ":", "ROOT", ".", "TAttMarker", ".", "SetMarkerColor", "(", "self", ",", "self", ".", "_markercolor", "(", "'root'", ")", ")" ]
*color* may be any color understood by ROOT or matplotlib. For full documentation of accepted *color* arguments, see :class:`rootpy.plotting.style.Color`.
[ "*", "color", "*", "may", "be", "any", "color", "understood", "by", "ROOT", "or", "matplotlib", "." ]
python
train
marshallward/f90nml
f90nml/namelist.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L179-L198
def indent(self, value): """Validate and set the indent width.""" # Explicit indent setting if isinstance(value, str): if value.isspace() or len(value) == 0: self._indent = value else: raise ValueError('String indentation can only contain ' 'whitespace.') # Set indent width elif isinstance(value, int): if value >= 0: self._indent = value * ' ' else: raise ValueError('Indentation spacing must be nonnegative.') else: raise TypeError('Indentation must be specified by string or space ' 'width.')
[ "def", "indent", "(", "self", ",", "value", ")", ":", "# Explicit indent setting", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "value", ".", "isspace", "(", ")", "or", "len", "(", "value", ")", "==", "0", ":", "self", ".", "_indent", "=", "value", "else", ":", "raise", "ValueError", "(", "'String indentation can only contain '", "'whitespace.'", ")", "# Set indent width", "elif", "isinstance", "(", "value", ",", "int", ")", ":", "if", "value", ">=", "0", ":", "self", ".", "_indent", "=", "value", "*", "' '", "else", ":", "raise", "ValueError", "(", "'Indentation spacing must be nonnegative.'", ")", "else", ":", "raise", "TypeError", "(", "'Indentation must be specified by string or space '", "'width.'", ")" ]
Validate and set the indent width.
[ "Validate", "and", "set", "the", "indent", "width", "." ]
python
train
mental32/spotify.py
spotify/models/player.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L85-L94
async def resume(self, *, device: Optional[SomeDevice] = None): """Resume playback on the user's account. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.play_playback(None, device_id=str(device))
[ "async", "def", "resume", "(", "self", ",", "*", ",", "device", ":", "Optional", "[", "SomeDevice", "]", "=", "None", ")", ":", "await", "self", ".", "_user", ".", "http", ".", "play_playback", "(", "None", ",", "device_id", "=", "str", "(", "device", ")", ")" ]
Resume playback on the user's account. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
[ "Resume", "playback", "on", "the", "user", "s", "account", "." ]
python
test
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L310-L326
def plot_file(self, name: str=None, time: int=None) -> None: """ Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column """ if not time: time = int(len(self.times) / 2) if not name: name = './img/' + self.filename + '.png' yhat, residuals, residual_mean, noise = self._get_fit(time) plt.figure() plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2) plt.plot(yhat) plt.savefig(name)
[ "def", "plot_file", "(", "self", ",", "name", ":", "str", "=", "None", ",", "time", ":", "int", "=", "None", ")", "->", "None", ":", "if", "not", "time", ":", "time", "=", "int", "(", "len", "(", "self", ".", "times", ")", "/", "2", ")", "if", "not", "name", ":", "name", "=", "'./img/'", "+", "self", ".", "filename", "+", "'.png'", "yhat", ",", "residuals", ",", "residual_mean", ",", "noise", "=", "self", ".", "_get_fit", "(", "time", ")", "plt", ".", "figure", "(", ")", "plt", ".", "scatter", "(", "self", ".", "domain", ",", "self", ".", "averagedata", "[", ":", ",", "time", "]", ",", "alpha", "=", "0.2", ")", "plt", ".", "plot", "(", "yhat", ")", "plt", ".", "savefig", "(", "name", ")" ]
Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column
[ "Plot", "specific", "time", "for", "provided", "datafile", ".", "If", "no", "time", "provided", "will", "plot", "middle", "." ]
python
train
scoutapp/scout_apm_python
src/scout_apm/django/middleware.py
https://github.com/scoutapp/scout_apm_python/blob/e5539ee23b8129be9b75d5007c88b6158b51294f/src/scout_apm/django/middleware.py#L67-L84
def process_view(self, request, view_func, view_args, view_kwargs): """ Capture details about the view_func that is about to execute """ try: if ignore_path(request.path): TrackedRequest.instance().tag("ignore_transaction", True) view_name = request.resolver_match._func_path span = TrackedRequest.instance().current_span() if span is not None: span.operation = "Controller/" + view_name Context.add("path", request.path) Context.add("user_ip", RemoteIp.lookup_from_headers(request.META)) if getattr(request, "user", None) is not None: Context.add("username", request.user.get_username()) except Exception: pass
[ "def", "process_view", "(", "self", ",", "request", ",", "view_func", ",", "view_args", ",", "view_kwargs", ")", ":", "try", ":", "if", "ignore_path", "(", "request", ".", "path", ")", ":", "TrackedRequest", ".", "instance", "(", ")", ".", "tag", "(", "\"ignore_transaction\"", ",", "True", ")", "view_name", "=", "request", ".", "resolver_match", ".", "_func_path", "span", "=", "TrackedRequest", ".", "instance", "(", ")", ".", "current_span", "(", ")", "if", "span", "is", "not", "None", ":", "span", ".", "operation", "=", "\"Controller/\"", "+", "view_name", "Context", ".", "add", "(", "\"path\"", ",", "request", ".", "path", ")", "Context", ".", "add", "(", "\"user_ip\"", ",", "RemoteIp", ".", "lookup_from_headers", "(", "request", ".", "META", ")", ")", "if", "getattr", "(", "request", ",", "\"user\"", ",", "None", ")", "is", "not", "None", ":", "Context", ".", "add", "(", "\"username\"", ",", "request", ".", "user", ".", "get_username", "(", ")", ")", "except", "Exception", ":", "pass" ]
Capture details about the view_func that is about to execute
[ "Capture", "details", "about", "the", "view_func", "that", "is", "about", "to", "execute" ]
python
train
apache/airflow
airflow/contrib/utils/sendgrid.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/utils/sendgrid.py#L33-L102
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None, bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs): """ Send an email with html content using sendgrid. To use this plugin: 0. include sendgrid subpackage as part of your Airflow installation, e.g., pip install 'apache-airflow[sendgrid]' 1. update [email] backend in airflow.cfg, i.e., [email] email_backend = airflow.contrib.utils.sendgrid.send_email 2. configure Sendgrid specific environment variables at all Airflow instances: SENDGRID_MAIL_FROM={your-mail-from} SENDGRID_API_KEY={your-sendgrid-api-key}. """ if files is None: files = [] mail = Mail() from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM') from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER') mail.from_email = Email(from_email, from_name) mail.subject = subject mail.mail_settings = MailSettings() if sandbox_mode: mail.mail_settings.sandbox_mode = SandBoxMode(enable=True) # Add the recipient list of to emails. personalization = Personalization() to = get_email_address_list(to) for to_address in to: personalization.add_to(Email(to_address)) if cc: cc = get_email_address_list(cc) for cc_address in cc: personalization.add_cc(Email(cc_address)) if bcc: bcc = get_email_address_list(bcc) for bcc_address in bcc: personalization.add_bcc(Email(bcc_address)) # Add custom_args to personalization if present pers_custom_args = kwargs.get('personalization_custom_args', None) if isinstance(pers_custom_args, dict): for key in pers_custom_args.keys(): personalization.add_custom_arg(CustomArg(key, pers_custom_args[key])) mail.add_personalization(personalization) mail.add_content(Content('text/html', html_content)) categories = kwargs.get('categories', []) for cat in categories: mail.add_category(Category(cat)) # Add email attachment. for fname in files: basename = os.path.basename(fname) attachment = Attachment() attachment.type = mimetypes.guess_type(basename)[0] attachment.filename = basename attachment.disposition = "attachment" attachment.content_id = '<{0}>'.format(basename) with open(fname, "rb") as f: attachment.content = base64.b64encode(f.read()).decode('utf-8') mail.add_attachment(attachment) _post_sendgrid_mail(mail.get())
[ "def", "send_email", "(", "to", ",", "subject", ",", "html_content", ",", "files", "=", "None", ",", "dryrun", "=", "False", ",", "cc", "=", "None", ",", "bcc", "=", "None", ",", "mime_subtype", "=", "'mixed'", ",", "sandbox_mode", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "files", "is", "None", ":", "files", "=", "[", "]", "mail", "=", "Mail", "(", ")", "from_email", "=", "kwargs", ".", "get", "(", "'from_email'", ")", "or", "os", ".", "environ", ".", "get", "(", "'SENDGRID_MAIL_FROM'", ")", "from_name", "=", "kwargs", ".", "get", "(", "'from_name'", ")", "or", "os", ".", "environ", ".", "get", "(", "'SENDGRID_MAIL_SENDER'", ")", "mail", ".", "from_email", "=", "Email", "(", "from_email", ",", "from_name", ")", "mail", ".", "subject", "=", "subject", "mail", ".", "mail_settings", "=", "MailSettings", "(", ")", "if", "sandbox_mode", ":", "mail", ".", "mail_settings", ".", "sandbox_mode", "=", "SandBoxMode", "(", "enable", "=", "True", ")", "# Add the recipient list of to emails.", "personalization", "=", "Personalization", "(", ")", "to", "=", "get_email_address_list", "(", "to", ")", "for", "to_address", "in", "to", ":", "personalization", ".", "add_to", "(", "Email", "(", "to_address", ")", ")", "if", "cc", ":", "cc", "=", "get_email_address_list", "(", "cc", ")", "for", "cc_address", "in", "cc", ":", "personalization", ".", "add_cc", "(", "Email", "(", "cc_address", ")", ")", "if", "bcc", ":", "bcc", "=", "get_email_address_list", "(", "bcc", ")", "for", "bcc_address", "in", "bcc", ":", "personalization", ".", "add_bcc", "(", "Email", "(", "bcc_address", ")", ")", "# Add custom_args to personalization if present", "pers_custom_args", "=", "kwargs", ".", "get", "(", "'personalization_custom_args'", ",", "None", ")", "if", "isinstance", "(", "pers_custom_args", ",", "dict", ")", ":", "for", "key", "in", "pers_custom_args", ".", "keys", "(", ")", ":", "personalization", ".", "add_custom_arg", "(", "CustomArg", "(", "key", ",", "pers_custom_args", "[", "key", "]", ")", ")", "mail", ".", "add_personalization", "(", "personalization", ")", "mail", ".", "add_content", "(", "Content", "(", "'text/html'", ",", "html_content", ")", ")", "categories", "=", "kwargs", ".", "get", "(", "'categories'", ",", "[", "]", ")", "for", "cat", "in", "categories", ":", "mail", ".", "add_category", "(", "Category", "(", "cat", ")", ")", "# Add email attachment.", "for", "fname", "in", "files", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "fname", ")", "attachment", "=", "Attachment", "(", ")", "attachment", ".", "type", "=", "mimetypes", ".", "guess_type", "(", "basename", ")", "[", "0", "]", "attachment", ".", "filename", "=", "basename", "attachment", ".", "disposition", "=", "\"attachment\"", "attachment", ".", "content_id", "=", "'<{0}>'", ".", "format", "(", "basename", ")", "with", "open", "(", "fname", ",", "\"rb\"", ")", "as", "f", ":", "attachment", ".", "content", "=", "base64", ".", "b64encode", "(", "f", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", "mail", ".", "add_attachment", "(", "attachment", ")", "_post_sendgrid_mail", "(", "mail", ".", "get", "(", ")", ")" ]
Send an email with html content using sendgrid. To use this plugin: 0. include sendgrid subpackage as part of your Airflow installation, e.g., pip install 'apache-airflow[sendgrid]' 1. update [email] backend in airflow.cfg, i.e., [email] email_backend = airflow.contrib.utils.sendgrid.send_email 2. configure Sendgrid specific environment variables at all Airflow instances: SENDGRID_MAIL_FROM={your-mail-from} SENDGRID_API_KEY={your-sendgrid-api-key}.
[ "Send", "an", "email", "with", "html", "content", "using", "sendgrid", "." ]
python
test
indico/indico-plugins
piwik/indico_piwik/piwik.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/piwik.py#L54-L66
def get_query(self, query_params=None): """Return a query string""" if query_params is None: query_params = {} query = '' query_params['idSite'] = self.site_id if self.api_token is not None: query_params['token_auth'] = self.api_token for key, value in query_params.iteritems(): if isinstance(value, list): value = ','.join(value) query += '{}={}&'.format(str(key), str(value)) return query[:-1]
[ "def", "get_query", "(", "self", ",", "query_params", "=", "None", ")", ":", "if", "query_params", "is", "None", ":", "query_params", "=", "{", "}", "query", "=", "''", "query_params", "[", "'idSite'", "]", "=", "self", ".", "site_id", "if", "self", ".", "api_token", "is", "not", "None", ":", "query_params", "[", "'token_auth'", "]", "=", "self", ".", "api_token", "for", "key", ",", "value", "in", "query_params", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "','", ".", "join", "(", "value", ")", "query", "+=", "'{}={}&'", ".", "format", "(", "str", "(", "key", ")", ",", "str", "(", "value", ")", ")", "return", "query", "[", ":", "-", "1", "]" ]
Return a query string
[ "Return", "a", "query", "string" ]
python
train
pyviz/holoviews
holoviews/core/data/grid.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/grid.py#L207-L239
def coords(cls, dataset, dim, ordered=False, expanded=False, edges=False): """ Returns the coordinates along a dimension. Ordered ensures coordinates are in ascending order and expanded creates ND-array matching the dimensionality of the dataset. """ dim = dataset.get_dimension(dim, strict=True) irregular = cls.irregular(dataset, dim) if irregular or expanded: if irregular: data = dataset.data[dim.name] else: data = util.expand_grid_coords(dataset, dim) if edges and data.shape == dataset.data[dataset.vdims[0].name].shape: data = cls._infer_interval_breaks(data, axis=1) data = cls._infer_interval_breaks(data, axis=0) return data data = dataset.data[dim.name] if ordered and np.all(data[1:] < data[:-1]): data = data[::-1] shape = cls.shape(dataset, True) if dim in dataset.kdims: idx = dataset.get_dimension_index(dim) isedges = (dim in dataset.kdims and len(shape) == dataset.ndims and len(data) == (shape[dataset.ndims-idx-1]+1)) else: isedges = False if edges and not isedges: data = cls._infer_interval_breaks(data) elif not edges and isedges: data = data[:-1] + np.diff(data)/2. return data
[ "def", "coords", "(", "cls", ",", "dataset", ",", "dim", ",", "ordered", "=", "False", ",", "expanded", "=", "False", ",", "edges", "=", "False", ")", ":", "dim", "=", "dataset", ".", "get_dimension", "(", "dim", ",", "strict", "=", "True", ")", "irregular", "=", "cls", ".", "irregular", "(", "dataset", ",", "dim", ")", "if", "irregular", "or", "expanded", ":", "if", "irregular", ":", "data", "=", "dataset", ".", "data", "[", "dim", ".", "name", "]", "else", ":", "data", "=", "util", ".", "expand_grid_coords", "(", "dataset", ",", "dim", ")", "if", "edges", "and", "data", ".", "shape", "==", "dataset", ".", "data", "[", "dataset", ".", "vdims", "[", "0", "]", ".", "name", "]", ".", "shape", ":", "data", "=", "cls", ".", "_infer_interval_breaks", "(", "data", ",", "axis", "=", "1", ")", "data", "=", "cls", ".", "_infer_interval_breaks", "(", "data", ",", "axis", "=", "0", ")", "return", "data", "data", "=", "dataset", ".", "data", "[", "dim", ".", "name", "]", "if", "ordered", "and", "np", ".", "all", "(", "data", "[", "1", ":", "]", "<", "data", "[", ":", "-", "1", "]", ")", ":", "data", "=", "data", "[", ":", ":", "-", "1", "]", "shape", "=", "cls", ".", "shape", "(", "dataset", ",", "True", ")", "if", "dim", "in", "dataset", ".", "kdims", ":", "idx", "=", "dataset", ".", "get_dimension_index", "(", "dim", ")", "isedges", "=", "(", "dim", "in", "dataset", ".", "kdims", "and", "len", "(", "shape", ")", "==", "dataset", ".", "ndims", "and", "len", "(", "data", ")", "==", "(", "shape", "[", "dataset", ".", "ndims", "-", "idx", "-", "1", "]", "+", "1", ")", ")", "else", ":", "isedges", "=", "False", "if", "edges", "and", "not", "isedges", ":", "data", "=", "cls", ".", "_infer_interval_breaks", "(", "data", ")", "elif", "not", "edges", "and", "isedges", ":", "data", "=", "data", "[", ":", "-", "1", "]", "+", "np", ".", "diff", "(", "data", ")", "/", "2.", "return", "data" ]
Returns the coordinates along a dimension. Ordered ensures coordinates are in ascending order and expanded creates ND-array matching the dimensionality of the dataset.
[ "Returns", "the", "coordinates", "along", "a", "dimension", ".", "Ordered", "ensures", "coordinates", "are", "in", "ascending", "order", "and", "expanded", "creates", "ND", "-", "array", "matching", "the", "dimensionality", "of", "the", "dataset", "." ]
python
train
sebdah/dynamic-dynamodb
dynamic_dynamodb/statistics/table.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/statistics/table.py#L291-L332
def get_throttled_by_consumed_write_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled write events in percent of consumption :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by consumption """ try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_write_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_write_percent = 0 logger.info( '{0} - Throttled write percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_write_percent)) return throttled_by_consumed_write_percent
[ "def", "get_throttled_by_consumed_write_percent", "(", "table_name", ",", "lookback_window_start", "=", "15", ",", "lookback_period", "=", "5", ")", ":", "try", ":", "metrics1", "=", "__get_aws_metric", "(", "table_name", ",", "lookback_window_start", ",", "lookback_period", ",", "'ConsumedWriteCapacityUnits'", ")", "metrics2", "=", "__get_aws_metric", "(", "table_name", ",", "lookback_window_start", ",", "lookback_period", ",", "'WriteThrottleEvents'", ")", "except", "BotoServerError", ":", "raise", "if", "metrics1", "and", "metrics2", ":", "lookback_seconds", "=", "lookback_period", "*", "60", "throttled_by_consumed_write_percent", "=", "(", "(", "(", "float", "(", "metrics2", "[", "0", "]", "[", "'Sum'", "]", ")", "/", "float", "(", "lookback_seconds", ")", ")", "/", "(", "float", "(", "metrics1", "[", "0", "]", "[", "'Sum'", "]", ")", "/", "float", "(", "lookback_seconds", ")", ")", ")", "*", "100", ")", "else", ":", "throttled_by_consumed_write_percent", "=", "0", "logger", ".", "info", "(", "'{0} - Throttled write percent by consumption: {1:.2f}%'", ".", "format", "(", "table_name", ",", "throttled_by_consumed_write_percent", ")", ")", "return", "throttled_by_consumed_write_percent" ]
Returns the number of throttled write events in percent of consumption :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by consumption
[ "Returns", "the", "number", "of", "throttled", "write", "events", "in", "percent", "of", "consumption" ]
python
train
edx/edx-organizations
organizations/data.py
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L198-L217
def create_organization_course(organization, course_key): """ Inserts a new organization-course relationship into app/local state No response currently defined for this operation """ organization_obj = serializers.deserialize_organization(organization) try: relationship = internal.OrganizationCourse.objects.get( organization=organization_obj, course_id=text_type(course_key) ) # If the relationship exists, but was inactivated, we can simply turn it back on if not relationship.active: _activate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: relationship = internal.OrganizationCourse.objects.create( organization=organization_obj, course_id=text_type(course_key), active=True )
[ "def", "create_organization_course", "(", "organization", ",", "course_key", ")", ":", "organization_obj", "=", "serializers", ".", "deserialize_organization", "(", "organization", ")", "try", ":", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "organization", "=", "organization_obj", ",", "course_id", "=", "text_type", "(", "course_key", ")", ")", "# If the relationship exists, but was inactivated, we can simply turn it back on", "if", "not", "relationship", ".", "active", ":", "_activate_organization_course_relationship", "(", "relationship", ")", "except", "internal", ".", "OrganizationCourse", ".", "DoesNotExist", ":", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "create", "(", "organization", "=", "organization_obj", ",", "course_id", "=", "text_type", "(", "course_key", ")", ",", "active", "=", "True", ")" ]
Inserts a new organization-course relationship into app/local state No response currently defined for this operation
[ "Inserts", "a", "new", "organization", "-", "course", "relationship", "into", "app", "/", "local", "state", "No", "response", "currently", "defined", "for", "this", "operation" ]
python
valid
seleniumbase/SeleniumBase
seleniumbase/core/mysql.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/mysql.py#L48-L55
def query_fetch_one(self, query, values): """ Executes a db query, gets the first value, and closes the connection. """ self.cursor.execute(query, values) retval = self.cursor.fetchone() self.__close_db() return retval
[ "def", "query_fetch_one", "(", "self", ",", "query", ",", "values", ")", ":", "self", ".", "cursor", ".", "execute", "(", "query", ",", "values", ")", "retval", "=", "self", ".", "cursor", ".", "fetchone", "(", ")", "self", ".", "__close_db", "(", ")", "return", "retval" ]
Executes a db query, gets the first value, and closes the connection.
[ "Executes", "a", "db", "query", "gets", "the", "first", "value", "and", "closes", "the", "connection", "." ]
python
train
mosdef-hub/mbuild
mbuild/compound.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/compound.py#L1205-L1229
def update_coordinates(self, filename, update_port_locations=True): """Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file """ if update_port_locations: xyz_init = self.xyz self = load(filename, compound=self, coords_only=True) self._update_port_locations(xyz_init) else: self = load(filename, compound=self, coords_only=True)
[ "def", "update_coordinates", "(", "self", ",", "filename", ",", "update_port_locations", "=", "True", ")", ":", "if", "update_port_locations", ":", "xyz_init", "=", "self", ".", "xyz", "self", "=", "load", "(", "filename", ",", "compound", "=", "self", ",", "coords_only", "=", "True", ")", "self", ".", "_update_port_locations", "(", "xyz_init", ")", "else", ":", "self", "=", "load", "(", "filename", ",", "compound", "=", "self", ",", "coords_only", "=", "True", ")" ]
Update the coordinates of this Compound from a file. Parameters ---------- filename : str Name of file from which to load coordinates. Supported file types are the same as those supported by load() update_port_locations : bool, optional, default=True Update the locations of Ports so that they are shifted along with their anchor particles. Note: This conserves the location of Ports with respect to the anchor Particle, but does not conserve the orientation of Ports with respect to the molecule as a whole. See Also -------- load : Load coordinates from a file
[ "Update", "the", "coordinates", "of", "this", "Compound", "from", "a", "file", "." ]
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_kw15_layermode.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw15_layermode.py#L107-L152
def set_widgets(self): """Set widgets on the LayerMode tab.""" self.clear_further_steps() # Set widgets purpose = self.parent.step_kw_purpose.selected_purpose() subcategory = self.parent.step_kw_subcategory.selected_subcategory() layer_mode_question = ( layer_mode_raster_question if is_raster_layer(self.parent.layer) else layer_mode_vector_question) self.lblDescribeLayerMode.setText('') self.lstLayerModes.clear() layer_modes = get_layer_modes(subcategory['key']) if is_raster_layer(self.parent.layer): layer_mode_question = layer_mode_raster_question else: if len(layer_modes) == 2: layer_mode_question = layer_mode_vector_question elif len(layer_modes) == 1: if layer_modes[0]['key'] == 'classified': layer_mode_question = layer_mode_vector_classified_confirm elif layer_modes[0]['key'] == 'continuous': layer_mode_question = layer_mode_vector_continuous_confirm else: layer_mode_question = layer_mode_vector_question self.lblSelectLayerMode.setText( layer_mode_question % (subcategory['name'], purpose['name'])) for layer_mode in layer_modes: item = QListWidgetItem(layer_mode['name'], self.lstLayerModes) item.setData(QtCore.Qt.UserRole, layer_mode['key']) self.lstLayerModes.addItem(item) # Set value to existing keyword or default value layer_mode_keys = [m['key'] for m in layer_modes] layer_mode_keyword = self.parent.get_existing_keyword('layer_mode') if layer_mode_keyword in layer_mode_keys: index = layer_mode_keys.index(layer_mode_keyword) elif layer_mode_continuous['key'] in layer_mode_keys: # Set default value index = layer_mode_keys.index(layer_mode_continuous['key']) else: index = -1 self.lstLayerModes.setCurrentRow(index) self.auto_select_one_item(self.lstLayerModes)
[ "def", "set_widgets", "(", "self", ")", ":", "self", ".", "clear_further_steps", "(", ")", "# Set widgets", "purpose", "=", "self", ".", "parent", ".", "step_kw_purpose", ".", "selected_purpose", "(", ")", "subcategory", "=", "self", ".", "parent", ".", "step_kw_subcategory", ".", "selected_subcategory", "(", ")", "layer_mode_question", "=", "(", "layer_mode_raster_question", "if", "is_raster_layer", "(", "self", ".", "parent", ".", "layer", ")", "else", "layer_mode_vector_question", ")", "self", ".", "lblDescribeLayerMode", ".", "setText", "(", "''", ")", "self", ".", "lstLayerModes", ".", "clear", "(", ")", "layer_modes", "=", "get_layer_modes", "(", "subcategory", "[", "'key'", "]", ")", "if", "is_raster_layer", "(", "self", ".", "parent", ".", "layer", ")", ":", "layer_mode_question", "=", "layer_mode_raster_question", "else", ":", "if", "len", "(", "layer_modes", ")", "==", "2", ":", "layer_mode_question", "=", "layer_mode_vector_question", "elif", "len", "(", "layer_modes", ")", "==", "1", ":", "if", "layer_modes", "[", "0", "]", "[", "'key'", "]", "==", "'classified'", ":", "layer_mode_question", "=", "layer_mode_vector_classified_confirm", "elif", "layer_modes", "[", "0", "]", "[", "'key'", "]", "==", "'continuous'", ":", "layer_mode_question", "=", "layer_mode_vector_continuous_confirm", "else", ":", "layer_mode_question", "=", "layer_mode_vector_question", "self", ".", "lblSelectLayerMode", ".", "setText", "(", "layer_mode_question", "%", "(", "subcategory", "[", "'name'", "]", ",", "purpose", "[", "'name'", "]", ")", ")", "for", "layer_mode", "in", "layer_modes", ":", "item", "=", "QListWidgetItem", "(", "layer_mode", "[", "'name'", "]", ",", "self", ".", "lstLayerModes", ")", "item", ".", "setData", "(", "QtCore", ".", "Qt", ".", "UserRole", ",", "layer_mode", "[", "'key'", "]", ")", "self", ".", "lstLayerModes", ".", "addItem", "(", "item", ")", "# Set value to existing keyword or default value", "layer_mode_keys", "=", "[", "m", "[", "'key'", "]", "for", "m", "in", "layer_modes", "]", "layer_mode_keyword", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'layer_mode'", ")", "if", "layer_mode_keyword", "in", "layer_mode_keys", ":", "index", "=", "layer_mode_keys", ".", "index", "(", "layer_mode_keyword", ")", "elif", "layer_mode_continuous", "[", "'key'", "]", "in", "layer_mode_keys", ":", "# Set default value", "index", "=", "layer_mode_keys", ".", "index", "(", "layer_mode_continuous", "[", "'key'", "]", ")", "else", ":", "index", "=", "-", "1", "self", ".", "lstLayerModes", ".", "setCurrentRow", "(", "index", ")", "self", ".", "auto_select_one_item", "(", "self", ".", "lstLayerModes", ")" ]
Set widgets on the LayerMode tab.
[ "Set", "widgets", "on", "the", "LayerMode", "tab", "." ]
python
train
Esri/ArcREST
src/arcrest/manageags/_system.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L114-L148
def registerDirectory(self,name,physicalPath,directoryType,cleanupMode, maxFileAge,description): """ Registers a new server directory. While registering the server directory, you can also specify the directory's cleanup parameters. You can also register a directory by using its JSON representation as a value of the directory parameter. Inputs: name - The name of the server directory. physicalPath - The absolute physical path of the server directory. directoryType - The type of server directory. cleanupMode - Defines if files in the server directory needs to be cleaned up. Default: NONE maxFileAge - Defines how long a file in the directory needs to be kept before it is deleted (in minutes). description - An optional description for the server directory. """ url = self._url + "/directories/register" params = { "f" : "json", "name" : name, "physicalPath" : physicalPath, "directoryType" : directoryType, "cleanupMode" : cleanupMode, "maxFileAge" : maxFileAge, "description" : description } res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
[ "def", "registerDirectory", "(", "self", ",", "name", ",", "physicalPath", ",", "directoryType", ",", "cleanupMode", ",", "maxFileAge", ",", "description", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/directories/register\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"name\"", ":", "name", ",", "\"physicalPath\"", ":", "physicalPath", ",", "\"directoryType\"", ":", "directoryType", ",", "\"cleanupMode\"", ":", "cleanupMode", ",", "\"maxFileAge\"", ":", "maxFileAge", ",", "\"description\"", ":", "description", "}", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "return", "res" ]
Registers a new server directory. While registering the server directory, you can also specify the directory's cleanup parameters. You can also register a directory by using its JSON representation as a value of the directory parameter. Inputs: name - The name of the server directory. physicalPath - The absolute physical path of the server directory. directoryType - The type of server directory. cleanupMode - Defines if files in the server directory needs to be cleaned up. Default: NONE maxFileAge - Defines how long a file in the directory needs to be kept before it is deleted (in minutes). description - An optional description for the server directory.
[ "Registers", "a", "new", "server", "directory", ".", "While", "registering", "the", "server", "directory", "you", "can", "also", "specify", "the", "directory", "s", "cleanup", "parameters", ".", "You", "can", "also", "register", "a", "directory", "by", "using", "its", "JSON", "representation", "as", "a", "value", "of", "the", "directory", "parameter", ".", "Inputs", ":", "name", "-", "The", "name", "of", "the", "server", "directory", ".", "physicalPath", "-", "The", "absolute", "physical", "path", "of", "the", "server", "directory", ".", "directoryType", "-", "The", "type", "of", "server", "directory", ".", "cleanupMode", "-", "Defines", "if", "files", "in", "the", "server", "directory", "needs", "to", "be", "cleaned", "up", ".", "Default", ":", "NONE", "maxFileAge", "-", "Defines", "how", "long", "a", "file", "in", "the", "directory", "needs", "to", "be", "kept", "before", "it", "is", "deleted", "(", "in", "minutes", ")", ".", "description", "-", "An", "optional", "description", "for", "the", "server", "directory", "." ]
python
train
sosy-lab/benchexec
benchexec/runexecutor.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/runexecutor.py#L587-L610
def _setup_environment(self, environments): """Return map with desired environment variables for run.""" # If keepEnv is set or sudo is used, start from a fresh environment, # otherwise with the current one. # keepEnv specifies variables to copy from the current environment, # newEnv specifies variables to set to a new value, # additionalEnv specifies variables where some value should be appended, and # clearEnv specifies variables to delete. if self._user is not None or environments.get("keepEnv", None) is not None: run_environment = {} else: run_environment = os.environ.copy() for key, value in environments.get("keepEnv", {}).items(): if key in os.environ: run_environment[key] = os.environ[key] for key, value in environments.get("newEnv", {}).items(): run_environment[key] = value for key, value in environments.get("additionalEnv", {}).items(): run_environment[key] = os.environ.get(key, "") + value for key in environments.get("clearEnv", {}).items(): run_environment.pop(key, None) logging.debug("Using additional environment %s.", environments) return run_environment
[ "def", "_setup_environment", "(", "self", ",", "environments", ")", ":", "# If keepEnv is set or sudo is used, start from a fresh environment,", "# otherwise with the current one.", "# keepEnv specifies variables to copy from the current environment,", "# newEnv specifies variables to set to a new value,", "# additionalEnv specifies variables where some value should be appended, and", "# clearEnv specifies variables to delete.", "if", "self", ".", "_user", "is", "not", "None", "or", "environments", ".", "get", "(", "\"keepEnv\"", ",", "None", ")", "is", "not", "None", ":", "run_environment", "=", "{", "}", "else", ":", "run_environment", "=", "os", ".", "environ", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "environments", ".", "get", "(", "\"keepEnv\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "key", "in", "os", ".", "environ", ":", "run_environment", "[", "key", "]", "=", "os", ".", "environ", "[", "key", "]", "for", "key", ",", "value", "in", "environments", ".", "get", "(", "\"newEnv\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "run_environment", "[", "key", "]", "=", "value", "for", "key", ",", "value", "in", "environments", ".", "get", "(", "\"additionalEnv\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "run_environment", "[", "key", "]", "=", "os", ".", "environ", ".", "get", "(", "key", ",", "\"\"", ")", "+", "value", "for", "key", "in", "environments", ".", "get", "(", "\"clearEnv\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "run_environment", ".", "pop", "(", "key", ",", "None", ")", "logging", ".", "debug", "(", "\"Using additional environment %s.\"", ",", "environments", ")", "return", "run_environment" ]
Return map with desired environment variables for run.
[ "Return", "map", "with", "desired", "environment", "variables", "for", "run", "." ]
python
train
gem/oq-engine
openquake/hazardlib/geo/surface/complex_fault.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/complex_fault.py#L96-L111
def get_dip(self): """ Return the fault dip as the average dip over the mesh. The average dip is defined as the weighted mean inclination of all the mesh cells. See :meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth` :returns: The average dip, in decimal degrees. """ # uses the same approach as in simple fault surface if self.dip is None: mesh = self.mesh self.dip, self.strike = mesh.get_mean_inclination_and_azimuth() return self.dip
[ "def", "get_dip", "(", "self", ")", ":", "# uses the same approach as in simple fault surface", "if", "self", ".", "dip", "is", "None", ":", "mesh", "=", "self", ".", "mesh", "self", ".", "dip", ",", "self", ".", "strike", "=", "mesh", ".", "get_mean_inclination_and_azimuth", "(", ")", "return", "self", ".", "dip" ]
Return the fault dip as the average dip over the mesh. The average dip is defined as the weighted mean inclination of all the mesh cells. See :meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth` :returns: The average dip, in decimal degrees.
[ "Return", "the", "fault", "dip", "as", "the", "average", "dip", "over", "the", "mesh", "." ]
python
train
jmwri/simplejwt
simplejwt/jwt.py
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/jwt.py#L397-L438
def decode(secret: Union[str, bytes], token: Union[str, bytes], alg: str = default_alg) -> Tuple[dict, dict]: """ Decodes the given token's header and payload and validates the signature. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded header and payload. :rtype: Tuple[dict, dict] """ secret = util.to_bytes(secret) token = util.to_bytes(token) pre_signature, signature_segment = token.rsplit(b'.', 1) header_b64, payload_b64 = pre_signature.split(b'.') try: header_json = util.b64_decode(header_b64) header = json.loads(util.from_bytes(header_json)) except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError): raise InvalidHeaderError('Invalid header') try: payload_json = util.b64_decode(payload_b64) payload = json.loads(util.from_bytes(payload_json)) except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError): raise InvalidPayloadError('Invalid payload') if not isinstance(header, dict): raise InvalidHeaderError('Invalid header: {}'.format(header)) if not isinstance(payload, dict): raise InvalidPayloadError('Invalid payload: {}'.format(payload)) signature = util.b64_decode(signature_segment) calculated_signature = _hash(secret, pre_signature, alg) if not compare_signature(signature, calculated_signature): raise InvalidSignatureError('Invalid signature') return header, payload
[ "def", "decode", "(", "secret", ":", "Union", "[", "str", ",", "bytes", "]", ",", "token", ":", "Union", "[", "str", ",", "bytes", "]", ",", "alg", ":", "str", "=", "default_alg", ")", "->", "Tuple", "[", "dict", ",", "dict", "]", ":", "secret", "=", "util", ".", "to_bytes", "(", "secret", ")", "token", "=", "util", ".", "to_bytes", "(", "token", ")", "pre_signature", ",", "signature_segment", "=", "token", ".", "rsplit", "(", "b'.'", ",", "1", ")", "header_b64", ",", "payload_b64", "=", "pre_signature", ".", "split", "(", "b'.'", ")", "try", ":", "header_json", "=", "util", ".", "b64_decode", "(", "header_b64", ")", "header", "=", "json", ".", "loads", "(", "util", ".", "from_bytes", "(", "header_json", ")", ")", "except", "(", "json", ".", "decoder", ".", "JSONDecodeError", ",", "UnicodeDecodeError", ",", "ValueError", ")", ":", "raise", "InvalidHeaderError", "(", "'Invalid header'", ")", "try", ":", "payload_json", "=", "util", ".", "b64_decode", "(", "payload_b64", ")", "payload", "=", "json", ".", "loads", "(", "util", ".", "from_bytes", "(", "payload_json", ")", ")", "except", "(", "json", ".", "decoder", ".", "JSONDecodeError", ",", "UnicodeDecodeError", ",", "ValueError", ")", ":", "raise", "InvalidPayloadError", "(", "'Invalid payload'", ")", "if", "not", "isinstance", "(", "header", ",", "dict", ")", ":", "raise", "InvalidHeaderError", "(", "'Invalid header: {}'", ".", "format", "(", "header", ")", ")", "if", "not", "isinstance", "(", "payload", ",", "dict", ")", ":", "raise", "InvalidPayloadError", "(", "'Invalid payload: {}'", ".", "format", "(", "payload", ")", ")", "signature", "=", "util", ".", "b64_decode", "(", "signature_segment", ")", "calculated_signature", "=", "_hash", "(", "secret", ",", "pre_signature", ",", "alg", ")", "if", "not", "compare_signature", "(", "signature", ",", "calculated_signature", ")", ":", "raise", "InvalidSignatureError", "(", "'Invalid signature'", ")", "return", "header", ",", "payload" ]
Decodes the given token's header and payload and validates the signature. :param secret: The secret used to decode the token. Must match the secret used when creating the token. :type secret: Union[str, bytes] :param token: The token to decode. :type token: Union[str, bytes] :param alg: The algorithm used to decode the token. Must match the algorithm used when creating the token. :type alg: str :return: The decoded header and payload. :rtype: Tuple[dict, dict]
[ "Decodes", "the", "given", "token", "s", "header", "and", "payload", "and", "validates", "the", "signature", "." ]
python
valid
chaoss/grimoirelab-sortinghat
sortinghat/cmd/unify.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/unify.py#L315-L327
def save_matches(self, matches): """Save matches of a failed execution to the log. :param matches: a list of matches in JSON format """ if not os.path.exists(os.path.dirname(self.location())): os.makedirs(os.path.dirname(self.location())) with open(self.location(), "w+") as f: matches = [m for m in matches if not m['processed']] for m in matches: match_obj = json.dumps(m) f.write(match_obj + "\n")
[ "def", "save_matches", "(", "self", ",", "matches", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "location", "(", ")", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "location", "(", ")", ")", ")", "with", "open", "(", "self", ".", "location", "(", ")", ",", "\"w+\"", ")", "as", "f", ":", "matches", "=", "[", "m", "for", "m", "in", "matches", "if", "not", "m", "[", "'processed'", "]", "]", "for", "m", "in", "matches", ":", "match_obj", "=", "json", ".", "dumps", "(", "m", ")", "f", ".", "write", "(", "match_obj", "+", "\"\\n\"", ")" ]
Save matches of a failed execution to the log. :param matches: a list of matches in JSON format
[ "Save", "matches", "of", "a", "failed", "execution", "to", "the", "log", "." ]
python
train
mweb/appconfig
appconfig/appconfig.py
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L152-L166
def load(self, filename): ''' Load the given config file. @param filename: the filename including the path to load. ''' if not os.path.exists(filename): #print 'Could not load config file [%s]' % (filename) raise AppConfigValueException('Could not load config file {0}'. format(filename)) cfl = open(filename, 'r') if PY2: self.readfp(cfl) else: self.read_file(cfl) cfl.close()
[ "def", "load", "(", "self", ",", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "#print 'Could not load config file [%s]' % (filename)", "raise", "AppConfigValueException", "(", "'Could not load config file {0}'", ".", "format", "(", "filename", ")", ")", "cfl", "=", "open", "(", "filename", ",", "'r'", ")", "if", "PY2", ":", "self", ".", "readfp", "(", "cfl", ")", "else", ":", "self", ".", "read_file", "(", "cfl", ")", "cfl", ".", "close", "(", ")" ]
Load the given config file. @param filename: the filename including the path to load.
[ "Load", "the", "given", "config", "file", "." ]
python
train
Staffjoy/client_python
staffjoy/resource.py
https://github.com/Staffjoy/client_python/blob/e8811b0c06651a15e691c96cbfd41e7da4f7f213/staffjoy/resource.py#L106-L110
def _url(self): """Get the URL for the resource""" if self.ID_NAME not in self.route.keys() and "id" in self.data.keys(): self.route[self.ID_NAME] = self.data["id"] return self.config.BASE + self.PATH.format(**self.route)
[ "def", "_url", "(", "self", ")", ":", "if", "self", ".", "ID_NAME", "not", "in", "self", ".", "route", ".", "keys", "(", ")", "and", "\"id\"", "in", "self", ".", "data", ".", "keys", "(", ")", ":", "self", ".", "route", "[", "self", ".", "ID_NAME", "]", "=", "self", ".", "data", "[", "\"id\"", "]", "return", "self", ".", "config", ".", "BASE", "+", "self", ".", "PATH", ".", "format", "(", "*", "*", "self", ".", "route", ")" ]
Get the URL for the resource
[ "Get", "the", "URL", "for", "the", "resource" ]
python
train
mgoral/subconvert
src/subconvert/gui/SubtitleCommands.py
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/SubtitleCommands.py#L60-L66
def setup(self): """When subclassing remember to call SubtitleChangeCommand::setup() to perform generic checks.""" if not isinstance(self.filePath, str): raise TypeError("File path is not a string!") if self.controller is None: raise ValueError("Command controller hasn't been specified!")
[ "def", "setup", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "filePath", ",", "str", ")", ":", "raise", "TypeError", "(", "\"File path is not a string!\"", ")", "if", "self", ".", "controller", "is", "None", ":", "raise", "ValueError", "(", "\"Command controller hasn't been specified!\"", ")" ]
When subclassing remember to call SubtitleChangeCommand::setup() to perform generic checks.
[ "When", "subclassing", "remember", "to", "call", "SubtitleChangeCommand", "::", "setup", "()", "to", "perform", "generic", "checks", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1332-L1350
def trap_http_exception(self, e): """Checks if an HTTP exception should be trapped or not. By default this will return `False` for all exceptions except for a bad request key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. This is called for all HTTP exceptions raised by a view function. If it returns `True` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. .. versionadded:: 0.8 """ if self.config['TRAP_HTTP_EXCEPTIONS']: return True if self.config['TRAP_BAD_REQUEST_ERRORS']: return isinstance(e, BadRequest) return False
[ "def", "trap_http_exception", "(", "self", ",", "e", ")", ":", "if", "self", ".", "config", "[", "'TRAP_HTTP_EXCEPTIONS'", "]", ":", "return", "True", "if", "self", ".", "config", "[", "'TRAP_BAD_REQUEST_ERRORS'", "]", ":", "return", "isinstance", "(", "e", ",", "BadRequest", ")", "return", "False" ]
Checks if an HTTP exception should be trapped or not. By default this will return `False` for all exceptions except for a bad request key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. This is called for all HTTP exceptions raised by a view function. If it returns `True` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. .. versionadded:: 0.8
[ "Checks", "if", "an", "HTTP", "exception", "should", "be", "trapped", "or", "not", ".", "By", "default", "this", "will", "return", "False", "for", "all", "exceptions", "except", "for", "a", "bad", "request", "key", "error", "if", "TRAP_BAD_REQUEST_ERRORS", "is", "set", "to", "True", ".", "It", "also", "returns", "True", "if", "TRAP_HTTP_EXCEPTIONS", "is", "set", "to", "True", "." ]
python
test
apple/turicreate
deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/predef/tools/ci/build_log.py#L363-L378
def print_action(self, test_succeed, action): ''' Print the detailed info of failed or always print tests. ''' #self.info_print(">>> {0}",action.keys()) if not test_succeed or action['info']['always_show_run_output']: output = action['output'].strip() if output != "": p = self.fail_print if action['result'] == 'fail' else self.p_print self.info_print("") self.info_print("({0}) {1}",action['info']['name'],action['info']['path']) p("") p("{0}",action['command'].strip()) p("") for line in output.splitlines(): p("{0}",line.encode('utf-8'))
[ "def", "print_action", "(", "self", ",", "test_succeed", ",", "action", ")", ":", "#self.info_print(\">>> {0}\",action.keys())", "if", "not", "test_succeed", "or", "action", "[", "'info'", "]", "[", "'always_show_run_output'", "]", ":", "output", "=", "action", "[", "'output'", "]", ".", "strip", "(", ")", "if", "output", "!=", "\"\"", ":", "p", "=", "self", ".", "fail_print", "if", "action", "[", "'result'", "]", "==", "'fail'", "else", "self", ".", "p_print", "self", ".", "info_print", "(", "\"\"", ")", "self", ".", "info_print", "(", "\"({0}) {1}\"", ",", "action", "[", "'info'", "]", "[", "'name'", "]", ",", "action", "[", "'info'", "]", "[", "'path'", "]", ")", "p", "(", "\"\"", ")", "p", "(", "\"{0}\"", ",", "action", "[", "'command'", "]", ".", "strip", "(", ")", ")", "p", "(", "\"\"", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "p", "(", "\"{0}\"", ",", "line", ".", "encode", "(", "'utf-8'", ")", ")" ]
Print the detailed info of failed or always print tests.
[ "Print", "the", "detailed", "info", "of", "failed", "or", "always", "print", "tests", "." ]
python
train
berkeley-cocosci/Wallace
wallace/custom.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L696-L733
def get_info(node_id, info_id): """Get a specific info. Both the node and info id must be specified in the url. """ exp = experiment(session) # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/info, node does not exist") # execute the experiment method: info = models.Info.query.get(info_id) if info is None: return error_response(error_type="/info GET, info does not exist", participant=node.participant) elif (info.origin_id != node.id and info.id not in [t.info_id for t in node.transmissions(direction="incoming", status="received")]): return error_response(error_type="/info GET, forbidden info", status=403, participant=node.participant) try: # ping the experiment exp.info_get_request(node=node, infos=info) session.commit() except: return error_response(error_type="/info GET server error", status=403, participant=node.participant) # return the data return success_response(field="info", data=info.__json__(), request_type="info get")
[ "def", "get_info", "(", "node_id", ",", "info_id", ")", ":", "exp", "=", "experiment", "(", "session", ")", "# check the node exists", "node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "node_id", ")", "if", "node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/info, node does not exist\"", ")", "# execute the experiment method:", "info", "=", "models", ".", "Info", ".", "query", ".", "get", "(", "info_id", ")", "if", "info", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET, info does not exist\"", ",", "participant", "=", "node", ".", "participant", ")", "elif", "(", "info", ".", "origin_id", "!=", "node", ".", "id", "and", "info", ".", "id", "not", "in", "[", "t", ".", "info_id", "for", "t", "in", "node", ".", "transmissions", "(", "direction", "=", "\"incoming\"", ",", "status", "=", "\"received\"", ")", "]", ")", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET, forbidden info\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ")", "try", ":", "# ping the experiment", "exp", ".", "info_get_request", "(", "node", "=", "node", ",", "infos", "=", "info", ")", "session", ".", "commit", "(", ")", "except", ":", "return", "error_response", "(", "error_type", "=", "\"/info GET server error\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ")", "# return the data", "return", "success_response", "(", "field", "=", "\"info\"", ",", "data", "=", "info", ".", "__json__", "(", ")", ",", "request_type", "=", "\"info get\"", ")" ]
Get a specific info. Both the node and info id must be specified in the url.
[ "Get", "a", "specific", "info", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/mtf_transformer2.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer2.py#L454-L469
def mtf_unitransformer_base(): """Hyperparameters for single-stack Transformer.""" hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) # HYPERPARAMETERS FOR THE SINGLE LAYER STACK hparams.add_hparam("layers", ["self_att", "drd"] * 6) # number of heads in multihead attention hparams.add_hparam("num_heads", 8) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("num_memory_heads", 0) # share attention keys and values hparams.add_hparam("shared_kv", False) # if nonzero then use local attention hparams.add_hparam("local_attention_radius", 128) return hparams
[ "def", "mtf_unitransformer_base", "(", ")", ":", "hparams", "=", "mtf_transformer2_base", "(", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive\"", ",", "True", ")", "# HYPERPARAMETERS FOR THE SINGLE LAYER STACK", "hparams", ".", "add_hparam", "(", "\"layers\"", ",", "[", "\"self_att\"", ",", "\"drd\"", "]", "*", "6", ")", "# number of heads in multihead attention", "hparams", ".", "add_hparam", "(", "\"num_heads\"", ",", "8", ")", "# default of 0 for standard transformer behavior", "# 1 means a single set of keys and values that are read by all query heads", "hparams", ".", "add_hparam", "(", "\"num_memory_heads\"", ",", "0", ")", "# share attention keys and values", "hparams", ".", "add_hparam", "(", "\"shared_kv\"", ",", "False", ")", "# if nonzero then use local attention", "hparams", ".", "add_hparam", "(", "\"local_attention_radius\"", ",", "128", ")", "return", "hparams" ]
Hyperparameters for single-stack Transformer.
[ "Hyperparameters", "for", "single", "-", "stack", "Transformer", "." ]
python
train
gem/oq-engine
openquake/commonlib/logictree.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L1022-L1090
def validate_filters(self, branchset_node, uncertainty_type, filters): """ See superclass' method for description and signature specification. Checks that the following conditions are met: * "sourceModel" uncertainties can not have filters. * Absolute uncertainties must have only one filter -- "applyToSources", with only one source id. * All other uncertainty types can have either no or one filter. * Filter "applyToSources" must mention only source ids that exist in source models. * Filter "applyToTectonicRegionType" must mention only tectonic region types that exist in source models. * Filter "applyToSourceType" must mention only source types that exist in source models. """ if uncertainty_type == 'sourceModel' and filters: raise LogicTreeError( branchset_node, self.filename, 'filters are not allowed on source model uncertainty') if len(filters) > 1: raise LogicTreeError( branchset_node, self.filename, "only one filter is allowed per branchset") if 'applyToTectonicRegionType' in filters: if not filters['applyToTectonicRegionType'] \ in self.tectonic_region_types: raise LogicTreeError( branchset_node, self.filename, "source models don't define sources of tectonic region " "type '%s'" % filters['applyToTectonicRegionType']) if uncertainty_type in ('abGRAbsolute', 'maxMagGRAbsolute', 'simpleFaultGeometryAbsolute', 'complexFaultGeometryAbsolute'): if not filters or not list(filters) == ['applyToSources'] \ or not len(filters['applyToSources'].split()) == 1: raise LogicTreeError( branchset_node, self.filename, "uncertainty of type '%s' must define 'applyToSources' " "with only one source id" % uncertainty_type) if uncertainty_type in ('simpleFaultDipRelative', 'simpleFaultDipAbsolute'): if not filters or (not ('applyToSources' in filters.keys()) and not ('applyToSourceType' in filters.keys())): raise LogicTreeError( branchset_node, self.filename, "uncertainty of type '%s' must define either" "'applyToSources' or 'applyToSourceType'" % uncertainty_type) if 'applyToSourceType' in filters: if not filters['applyToSourceType'] in self.source_types: raise LogicTreeError( branchset_node, self.filename, "source models don't define sources of type '%s'" % filters['applyToSourceType']) if 'applyToSources' in filters: for source_id in filters['applyToSources'].split(): for source_ids in self.source_ids.values(): if source_id not in source_ids: raise LogicTreeError( branchset_node, self.filename, "source with id '%s' is not defined in source " "models" % source_id)
[ "def", "validate_filters", "(", "self", ",", "branchset_node", ",", "uncertainty_type", ",", "filters", ")", ":", "if", "uncertainty_type", "==", "'sourceModel'", "and", "filters", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "'filters are not allowed on source model uncertainty'", ")", "if", "len", "(", "filters", ")", ">", "1", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"only one filter is allowed per branchset\"", ")", "if", "'applyToTectonicRegionType'", "in", "filters", ":", "if", "not", "filters", "[", "'applyToTectonicRegionType'", "]", "in", "self", ".", "tectonic_region_types", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"source models don't define sources of tectonic region \"", "\"type '%s'\"", "%", "filters", "[", "'applyToTectonicRegionType'", "]", ")", "if", "uncertainty_type", "in", "(", "'abGRAbsolute'", ",", "'maxMagGRAbsolute'", ",", "'simpleFaultGeometryAbsolute'", ",", "'complexFaultGeometryAbsolute'", ")", ":", "if", "not", "filters", "or", "not", "list", "(", "filters", ")", "==", "[", "'applyToSources'", "]", "or", "not", "len", "(", "filters", "[", "'applyToSources'", "]", ".", "split", "(", ")", ")", "==", "1", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"uncertainty of type '%s' must define 'applyToSources' \"", "\"with only one source id\"", "%", "uncertainty_type", ")", "if", "uncertainty_type", "in", "(", "'simpleFaultDipRelative'", ",", "'simpleFaultDipAbsolute'", ")", ":", "if", "not", "filters", "or", "(", "not", "(", "'applyToSources'", "in", "filters", ".", "keys", "(", ")", ")", "and", "not", "(", "'applyToSourceType'", "in", "filters", ".", "keys", "(", ")", ")", ")", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"uncertainty of type '%s' must define either\"", "\"'applyToSources' or 'applyToSourceType'\"", "%", "uncertainty_type", ")", "if", "'applyToSourceType'", "in", "filters", ":", "if", "not", "filters", "[", "'applyToSourceType'", "]", "in", "self", ".", "source_types", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"source models don't define sources of type '%s'\"", "%", "filters", "[", "'applyToSourceType'", "]", ")", "if", "'applyToSources'", "in", "filters", ":", "for", "source_id", "in", "filters", "[", "'applyToSources'", "]", ".", "split", "(", ")", ":", "for", "source_ids", "in", "self", ".", "source_ids", ".", "values", "(", ")", ":", "if", "source_id", "not", "in", "source_ids", ":", "raise", "LogicTreeError", "(", "branchset_node", ",", "self", ".", "filename", ",", "\"source with id '%s' is not defined in source \"", "\"models\"", "%", "source_id", ")" ]
See superclass' method for description and signature specification. Checks that the following conditions are met: * "sourceModel" uncertainties can not have filters. * Absolute uncertainties must have only one filter -- "applyToSources", with only one source id. * All other uncertainty types can have either no or one filter. * Filter "applyToSources" must mention only source ids that exist in source models. * Filter "applyToTectonicRegionType" must mention only tectonic region types that exist in source models. * Filter "applyToSourceType" must mention only source types that exist in source models.
[ "See", "superclass", "method", "for", "description", "and", "signature", "specification", "." ]
python
train
csparpa/pyowm
pyowm/utils/geo.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/utils/geo.py#L355-L373
def build(cls, the_dict): """ Builds a `pyowm.utils.geo.Geometry` subtype based on the geoJSON geometry type specified on the input dictionary :param the_dict: a geoJSON compliant dict :return: a `pyowm.utils.geo.Geometry` subtype instance :raises `ValueError` if unable to the geometry type cannot be recognized """ assert isinstance(the_dict, dict), 'Geometry must be a dict' geom_type = the_dict.get('type', None) if geom_type == 'Point': return Point.from_dict(the_dict) elif geom_type == 'MultiPoint': return MultiPoint.from_dict(the_dict) elif geom_type == 'Polygon': return Polygon.from_dict(the_dict) elif geom_type == 'MultiPolygon': return MultiPolygon.from_dict(the_dict) else: raise ValueError('Unable to build a GeoType object: unrecognized geometry type')
[ "def", "build", "(", "cls", ",", "the_dict", ")", ":", "assert", "isinstance", "(", "the_dict", ",", "dict", ")", ",", "'Geometry must be a dict'", "geom_type", "=", "the_dict", ".", "get", "(", "'type'", ",", "None", ")", "if", "geom_type", "==", "'Point'", ":", "return", "Point", ".", "from_dict", "(", "the_dict", ")", "elif", "geom_type", "==", "'MultiPoint'", ":", "return", "MultiPoint", ".", "from_dict", "(", "the_dict", ")", "elif", "geom_type", "==", "'Polygon'", ":", "return", "Polygon", ".", "from_dict", "(", "the_dict", ")", "elif", "geom_type", "==", "'MultiPolygon'", ":", "return", "MultiPolygon", ".", "from_dict", "(", "the_dict", ")", "else", ":", "raise", "ValueError", "(", "'Unable to build a GeoType object: unrecognized geometry type'", ")" ]
Builds a `pyowm.utils.geo.Geometry` subtype based on the geoJSON geometry type specified on the input dictionary :param the_dict: a geoJSON compliant dict :return: a `pyowm.utils.geo.Geometry` subtype instance :raises `ValueError` if unable to the geometry type cannot be recognized
[ "Builds", "a", "pyowm", ".", "utils", ".", "geo", ".", "Geometry", "subtype", "based", "on", "the", "geoJSON", "geometry", "type", "specified", "on", "the", "input", "dictionary", ":", "param", "the_dict", ":", "a", "geoJSON", "compliant", "dict", ":", "return", ":", "a", "pyowm", ".", "utils", ".", "geo", ".", "Geometry", "subtype", "instance", ":", "raises", "ValueError", "if", "unable", "to", "the", "geometry", "type", "cannot", "be", "recognized" ]
python
train
OpenAgInitiative/openag_python
openag/utils.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/utils.py#L163-L172
def dedupe_by(things, key=None): """ Given an iterator of things and an optional key generation function, return a new iterator of deduped things. Things are compared and de-duped by the key function, which is hash() by default. """ if not key: key = hash index = {key(thing): thing for thing in things} return index.values()
[ "def", "dedupe_by", "(", "things", ",", "key", "=", "None", ")", ":", "if", "not", "key", ":", "key", "=", "hash", "index", "=", "{", "key", "(", "thing", ")", ":", "thing", "for", "thing", "in", "things", "}", "return", "index", ".", "values", "(", ")" ]
Given an iterator of things and an optional key generation function, return a new iterator of deduped things. Things are compared and de-duped by the key function, which is hash() by default.
[ "Given", "an", "iterator", "of", "things", "and", "an", "optional", "key", "generation", "function", "return", "a", "new", "iterator", "of", "deduped", "things", ".", "Things", "are", "compared", "and", "de", "-", "duped", "by", "the", "key", "function", "which", "is", "hash", "()", "by", "default", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/brute_force_methods.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/brute_force_methods.py#L124-L347
def get_mass_distribution(bestMasses, scaleFactor, massRangeParams, metricParams, fUpper, numJumpPoints=100, chirpMassJumpFac=0.0001, etaJumpFac=0.01, spin1zJumpFac=0.01, spin2zJumpFac=0.01): """ Given a set of masses, this function will create a set of points nearby in the mass space and map these to the xi space. Parameters ----------- bestMasses : list Contains [ChirpMass, eta, spin1z, spin2z]. Points will be placed around tjos scaleFactor : float This parameter describes the radius away from bestMasses that points will be placed in. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) numJumpPoints : int, optional (default = 100) The number of points that will be generated every iteration chirpMassJumpFac : float, optional (default=0.0001) The jump points will be chosen with fractional variation in chirpMass up to this multiplied by scaleFactor. etaJumpFac : float, optional (default=0.01) The jump points will be chosen with fractional variation in eta up to this multiplied by scaleFactor. spin1zJumpFac : float, optional (default=0.01) The jump points will be chosen with absolute variation in spin1z up to this multiplied by scaleFactor. spin2zJumpFac : float, optional (default=0.01) The jump points will be chosen with absolute variation in spin2z up to this multiplied by scaleFactor. Returns -------- Totmass : numpy.array Total mass of the resulting points Eta : numpy.array Symmetric mass ratio of the resulting points Spin1z : numpy.array Spin of the heavier body of the resulting points Spin2z : numpy.array Spin of the smaller body of the resulting points Diff : numpy.array Mass1 - Mass2 of the resulting points Mass1 : numpy.array Mass1 (mass of heavier body) of the resulting points Mass2 : numpy.array Mass2 (mass of smaller body) of the resulting points new_xis : list of numpy.array Position of points in the xi coordinates """ # FIXME: It would be better if rejected values could be drawn from the # full possible mass/spin distribution. However speed in this function is # a major factor and must be considered. bestChirpmass = bestMasses[0] bestEta = bestMasses[1] bestSpin1z = bestMasses[2] bestSpin2z = bestMasses[3] # Firstly choose a set of values for masses and spins chirpmass = bestChirpmass * (1 - (numpy.random.random(numJumpPoints)-0.5) \ * chirpMassJumpFac * scaleFactor ) etaRange = massRangeParams.maxEta - massRangeParams.minEta currJumpFac = etaJumpFac * scaleFactor if currJumpFac > etaRange: currJumpFac = etaRange eta = bestEta * ( 1 - (numpy.random.random(numJumpPoints) - 0.5) \ * currJumpFac) maxSpinMag = max(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag) minSpinMag = min(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag) # Note that these two are cranged by spinxzFac, *not* spinxzFac/spinxz currJumpFac = spin1zJumpFac * scaleFactor if currJumpFac > maxSpinMag: currJumpFac = maxSpinMag # Actually set the new spin trial points if massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): curr_spin_1z_jump_fac = currJumpFac curr_spin_2z_jump_fac = currJumpFac # Check spins aren't going to be unphysical if currJumpFac > massRangeParams.maxBHSpinMag: curr_spin_1z_jump_fac = massRangeParams.maxBHSpinMag if currJumpFac > massRangeParams.maxNSSpinMag: curr_spin_2z_jump_fac = massRangeParams.maxNSSpinMag spin1z = bestSpin1z + ( (numpy.random.random(numJumpPoints) - 0.5) \ * curr_spin_1z_jump_fac) spin2z = bestSpin2z + ( (numpy.random.random(numJumpPoints) - 0.5) \ * curr_spin_2z_jump_fac) else: # If maxNSSpinMag is very low (0) and maxBHSpinMag is high we can # find it hard to place any points. So mix these when # masses are swapping between the NS and BH. curr_spin_bh_jump_fac = currJumpFac curr_spin_ns_jump_fac = currJumpFac # Check spins aren't going to be unphysical if currJumpFac > massRangeParams.maxBHSpinMag: curr_spin_bh_jump_fac = massRangeParams.maxBHSpinMag if currJumpFac > massRangeParams.maxNSSpinMag: curr_spin_ns_jump_fac = massRangeParams.maxNSSpinMag spin1z = numpy.zeros(numJumpPoints, dtype=float) spin2z = numpy.zeros(numJumpPoints, dtype=float) split_point = int(numJumpPoints/2) # So set the first half to be at least within the BH range and the # second half to be at least within the NS range spin1z[:split_point] = bestSpin1z + \ ( (numpy.random.random(split_point) - 0.5)\ * curr_spin_bh_jump_fac) spin1z[split_point:] = bestSpin1z + \ ( (numpy.random.random(numJumpPoints-split_point) - 0.5)\ * curr_spin_ns_jump_fac) spin2z[:split_point] = bestSpin2z + \ ( (numpy.random.random(split_point) - 0.5)\ * curr_spin_bh_jump_fac) spin2z[split_point:] = bestSpin2z + \ ( (numpy.random.random(numJumpPoints-split_point) - 0.5)\ * curr_spin_ns_jump_fac) # Point[0] is always set to the original point chirpmass[0] = bestChirpmass eta[0] = bestEta spin1z[0] = bestSpin1z spin2z[0] = bestSpin2z # Remove points where eta becomes unphysical eta[eta > massRangeParams.maxEta] = massRangeParams.maxEta if massRangeParams.minEta: eta[eta < massRangeParams.minEta] = massRangeParams.minEta else: eta[eta < 0.0001] = 0.0001 # Total mass, masses and mass diff totmass = chirpmass / (eta**(3./5.)) diff = (totmass*totmass * (1-4*eta))**0.5 mass1 = (totmass + diff)/2. mass2 = (totmass - diff)/2. # Check the validity of the spin values # Do the first spin if maxSpinMag == 0: # Shortcut if non-spinning pass elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): # Simple case where I don't have to worry about correlation with mass numploga = abs(spin1z) > massRangeParams.maxBHSpinMag spin1z[numploga] = 0 else: # Do have to consider masses boundary_mass = massRangeParams.ns_bh_boundary_mass numploga1 = numpy.logical_and(mass1 >= boundary_mass, abs(spin1z) <= massRangeParams.maxBHSpinMag) numploga2 = numpy.logical_and(mass1 < boundary_mass, abs(spin1z) <= massRangeParams.maxNSSpinMag) numploga = numpy.logical_or(numploga1, numploga2) numploga = numpy.logical_not(numploga) spin1z[numploga] = 0 # Same for the second spin if maxSpinMag == 0: # Shortcut if non-spinning pass elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag): numplogb = abs(spin2z) > massRangeParams.maxNSSpinMag spin2z[numplogb] = 0 else: # Do have to consider masses boundary_mass = massRangeParams.ns_bh_boundary_mass numplogb1 = numpy.logical_and(mass2 >= boundary_mass, abs(spin2z) <= massRangeParams.maxBHSpinMag) numplogb2 = numpy.logical_and(mass2 < boundary_mass, abs(spin2z) <= massRangeParams.maxNSSpinMag) numplogb = numpy.logical_or(numplogb1, numplogb2) numplogb = numpy.logical_not(numplogb) spin2z[numplogb] = 0 if (maxSpinMag) and (numploga[0] or numplogb[0]): raise ValueError("Cannot remove the guide point!") # And remove points where the individual masses are outside of the physical # range. Or the total masses are. # These "removed" points will have metric distances that will be much, much # larger than any thresholds used in the functions in brute_force_utils.py # and will always be rejected. An unphysical value cannot be used as it # would result in unphysical metric distances and cause failures. totmass[mass1 < massRangeParams.minMass1] = 0.0001 totmass[mass1 > massRangeParams.maxMass1] = 0.0001 totmass[mass2 < massRangeParams.minMass2] = 0.0001 totmass[mass2 > massRangeParams.maxMass2] = 0.0001 # There is some numerical error which can push this a bit higher. We do # *not* want to reject the initial guide point. This error comes from # Masses -> totmass, eta -> masses conversion, we will have points pushing # onto the boudaries of the space. totmass[totmass > massRangeParams.maxTotMass*1.0001] = 0.0001 totmass[totmass < massRangeParams.minTotMass*0.9999] = 0.0001 if massRangeParams.max_chirp_mass: totmass[chirpmass > massRangeParams.max_chirp_mass*1.0001] = 0.0001 if massRangeParams.min_chirp_mass: totmass[chirpmass < massRangeParams.min_chirp_mass*0.9999] = 0.0001 if totmass[0] < 0.00011: raise ValueError("Cannot remove the guide point!") mass1[totmass < 0.00011] = 0.0001 mass2[totmass < 0.00011] = 0.0001 # Then map to xis new_xis = get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper) return totmass, eta, spin1z, spin2z, mass1, mass2, new_xis
[ "def", "get_mass_distribution", "(", "bestMasses", ",", "scaleFactor", ",", "massRangeParams", ",", "metricParams", ",", "fUpper", ",", "numJumpPoints", "=", "100", ",", "chirpMassJumpFac", "=", "0.0001", ",", "etaJumpFac", "=", "0.01", ",", "spin1zJumpFac", "=", "0.01", ",", "spin2zJumpFac", "=", "0.01", ")", ":", "# FIXME: It would be better if rejected values could be drawn from the ", "# full possible mass/spin distribution. However speed in this function is", "# a major factor and must be considered.", "bestChirpmass", "=", "bestMasses", "[", "0", "]", "bestEta", "=", "bestMasses", "[", "1", "]", "bestSpin1z", "=", "bestMasses", "[", "2", "]", "bestSpin2z", "=", "bestMasses", "[", "3", "]", "# Firstly choose a set of values for masses and spins", "chirpmass", "=", "bestChirpmass", "*", "(", "1", "-", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", ")", "-", "0.5", ")", "*", "chirpMassJumpFac", "*", "scaleFactor", ")", "etaRange", "=", "massRangeParams", ".", "maxEta", "-", "massRangeParams", ".", "minEta", "currJumpFac", "=", "etaJumpFac", "*", "scaleFactor", "if", "currJumpFac", ">", "etaRange", ":", "currJumpFac", "=", "etaRange", "eta", "=", "bestEta", "*", "(", "1", "-", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", ")", "-", "0.5", ")", "*", "currJumpFac", ")", "maxSpinMag", "=", "max", "(", "massRangeParams", ".", "maxNSSpinMag", ",", "massRangeParams", ".", "maxBHSpinMag", ")", "minSpinMag", "=", "min", "(", "massRangeParams", ".", "maxNSSpinMag", ",", "massRangeParams", ".", "maxBHSpinMag", ")", "# Note that these two are cranged by spinxzFac, *not* spinxzFac/spinxz", "currJumpFac", "=", "spin1zJumpFac", "*", "scaleFactor", "if", "currJumpFac", ">", "maxSpinMag", ":", "currJumpFac", "=", "maxSpinMag", "# Actually set the new spin trial points", "if", "massRangeParams", ".", "nsbhFlag", "or", "(", "maxSpinMag", "==", "minSpinMag", ")", ":", "curr_spin_1z_jump_fac", "=", "currJumpFac", "curr_spin_2z_jump_fac", "=", "currJumpFac", "# Check spins aren't going to be unphysical", "if", "currJumpFac", ">", "massRangeParams", ".", "maxBHSpinMag", ":", "curr_spin_1z_jump_fac", "=", "massRangeParams", ".", "maxBHSpinMag", "if", "currJumpFac", ">", "massRangeParams", ".", "maxNSSpinMag", ":", "curr_spin_2z_jump_fac", "=", "massRangeParams", ".", "maxNSSpinMag", "spin1z", "=", "bestSpin1z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", ")", "-", "0.5", ")", "*", "curr_spin_1z_jump_fac", ")", "spin2z", "=", "bestSpin2z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", ")", "-", "0.5", ")", "*", "curr_spin_2z_jump_fac", ")", "else", ":", "# If maxNSSpinMag is very low (0) and maxBHSpinMag is high we can", "# find it hard to place any points. So mix these when", "# masses are swapping between the NS and BH.", "curr_spin_bh_jump_fac", "=", "currJumpFac", "curr_spin_ns_jump_fac", "=", "currJumpFac", "# Check spins aren't going to be unphysical", "if", "currJumpFac", ">", "massRangeParams", ".", "maxBHSpinMag", ":", "curr_spin_bh_jump_fac", "=", "massRangeParams", ".", "maxBHSpinMag", "if", "currJumpFac", ">", "massRangeParams", ".", "maxNSSpinMag", ":", "curr_spin_ns_jump_fac", "=", "massRangeParams", ".", "maxNSSpinMag", "spin1z", "=", "numpy", ".", "zeros", "(", "numJumpPoints", ",", "dtype", "=", "float", ")", "spin2z", "=", "numpy", ".", "zeros", "(", "numJumpPoints", ",", "dtype", "=", "float", ")", "split_point", "=", "int", "(", "numJumpPoints", "/", "2", ")", "# So set the first half to be at least within the BH range and the", "# second half to be at least within the NS range", "spin1z", "[", ":", "split_point", "]", "=", "bestSpin1z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "split_point", ")", "-", "0.5", ")", "*", "curr_spin_bh_jump_fac", ")", "spin1z", "[", "split_point", ":", "]", "=", "bestSpin1z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", "-", "split_point", ")", "-", "0.5", ")", "*", "curr_spin_ns_jump_fac", ")", "spin2z", "[", ":", "split_point", "]", "=", "bestSpin2z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "split_point", ")", "-", "0.5", ")", "*", "curr_spin_bh_jump_fac", ")", "spin2z", "[", "split_point", ":", "]", "=", "bestSpin2z", "+", "(", "(", "numpy", ".", "random", ".", "random", "(", "numJumpPoints", "-", "split_point", ")", "-", "0.5", ")", "*", "curr_spin_ns_jump_fac", ")", "# Point[0] is always set to the original point", "chirpmass", "[", "0", "]", "=", "bestChirpmass", "eta", "[", "0", "]", "=", "bestEta", "spin1z", "[", "0", "]", "=", "bestSpin1z", "spin2z", "[", "0", "]", "=", "bestSpin2z", "# Remove points where eta becomes unphysical", "eta", "[", "eta", ">", "massRangeParams", ".", "maxEta", "]", "=", "massRangeParams", ".", "maxEta", "if", "massRangeParams", ".", "minEta", ":", "eta", "[", "eta", "<", "massRangeParams", ".", "minEta", "]", "=", "massRangeParams", ".", "minEta", "else", ":", "eta", "[", "eta", "<", "0.0001", "]", "=", "0.0001", "# Total mass, masses and mass diff", "totmass", "=", "chirpmass", "/", "(", "eta", "**", "(", "3.", "/", "5.", ")", ")", "diff", "=", "(", "totmass", "*", "totmass", "*", "(", "1", "-", "4", "*", "eta", ")", ")", "**", "0.5", "mass1", "=", "(", "totmass", "+", "diff", ")", "/", "2.", "mass2", "=", "(", "totmass", "-", "diff", ")", "/", "2.", "# Check the validity of the spin values", "# Do the first spin", "if", "maxSpinMag", "==", "0", ":", "# Shortcut if non-spinning", "pass", "elif", "massRangeParams", ".", "nsbhFlag", "or", "(", "maxSpinMag", "==", "minSpinMag", ")", ":", "# Simple case where I don't have to worry about correlation with mass", "numploga", "=", "abs", "(", "spin1z", ")", ">", "massRangeParams", ".", "maxBHSpinMag", "spin1z", "[", "numploga", "]", "=", "0", "else", ":", "# Do have to consider masses", "boundary_mass", "=", "massRangeParams", ".", "ns_bh_boundary_mass", "numploga1", "=", "numpy", ".", "logical_and", "(", "mass1", ">=", "boundary_mass", ",", "abs", "(", "spin1z", ")", "<=", "massRangeParams", ".", "maxBHSpinMag", ")", "numploga2", "=", "numpy", ".", "logical_and", "(", "mass1", "<", "boundary_mass", ",", "abs", "(", "spin1z", ")", "<=", "massRangeParams", ".", "maxNSSpinMag", ")", "numploga", "=", "numpy", ".", "logical_or", "(", "numploga1", ",", "numploga2", ")", "numploga", "=", "numpy", ".", "logical_not", "(", "numploga", ")", "spin1z", "[", "numploga", "]", "=", "0", "# Same for the second spin", "if", "maxSpinMag", "==", "0", ":", "# Shortcut if non-spinning", "pass", "elif", "massRangeParams", ".", "nsbhFlag", "or", "(", "maxSpinMag", "==", "minSpinMag", ")", ":", "numplogb", "=", "abs", "(", "spin2z", ")", ">", "massRangeParams", ".", "maxNSSpinMag", "spin2z", "[", "numplogb", "]", "=", "0", "else", ":", "# Do have to consider masses", "boundary_mass", "=", "massRangeParams", ".", "ns_bh_boundary_mass", "numplogb1", "=", "numpy", ".", "logical_and", "(", "mass2", ">=", "boundary_mass", ",", "abs", "(", "spin2z", ")", "<=", "massRangeParams", ".", "maxBHSpinMag", ")", "numplogb2", "=", "numpy", ".", "logical_and", "(", "mass2", "<", "boundary_mass", ",", "abs", "(", "spin2z", ")", "<=", "massRangeParams", ".", "maxNSSpinMag", ")", "numplogb", "=", "numpy", ".", "logical_or", "(", "numplogb1", ",", "numplogb2", ")", "numplogb", "=", "numpy", ".", "logical_not", "(", "numplogb", ")", "spin2z", "[", "numplogb", "]", "=", "0", "if", "(", "maxSpinMag", ")", "and", "(", "numploga", "[", "0", "]", "or", "numplogb", "[", "0", "]", ")", ":", "raise", "ValueError", "(", "\"Cannot remove the guide point!\"", ")", "# And remove points where the individual masses are outside of the physical", "# range. Or the total masses are.", "# These \"removed\" points will have metric distances that will be much, much", "# larger than any thresholds used in the functions in brute_force_utils.py", "# and will always be rejected. An unphysical value cannot be used as it", "# would result in unphysical metric distances and cause failures.", "totmass", "[", "mass1", "<", "massRangeParams", ".", "minMass1", "]", "=", "0.0001", "totmass", "[", "mass1", ">", "massRangeParams", ".", "maxMass1", "]", "=", "0.0001", "totmass", "[", "mass2", "<", "massRangeParams", ".", "minMass2", "]", "=", "0.0001", "totmass", "[", "mass2", ">", "massRangeParams", ".", "maxMass2", "]", "=", "0.0001", "# There is some numerical error which can push this a bit higher. We do", "# *not* want to reject the initial guide point. This error comes from", "# Masses -> totmass, eta -> masses conversion, we will have points pushing", "# onto the boudaries of the space.", "totmass", "[", "totmass", ">", "massRangeParams", ".", "maxTotMass", "*", "1.0001", "]", "=", "0.0001", "totmass", "[", "totmass", "<", "massRangeParams", ".", "minTotMass", "*", "0.9999", "]", "=", "0.0001", "if", "massRangeParams", ".", "max_chirp_mass", ":", "totmass", "[", "chirpmass", ">", "massRangeParams", ".", "max_chirp_mass", "*", "1.0001", "]", "=", "0.0001", "if", "massRangeParams", ".", "min_chirp_mass", ":", "totmass", "[", "chirpmass", "<", "massRangeParams", ".", "min_chirp_mass", "*", "0.9999", "]", "=", "0.0001", "if", "totmass", "[", "0", "]", "<", "0.00011", ":", "raise", "ValueError", "(", "\"Cannot remove the guide point!\"", ")", "mass1", "[", "totmass", "<", "0.00011", "]", "=", "0.0001", "mass2", "[", "totmass", "<", "0.00011", "]", "=", "0.0001", "# Then map to xis", "new_xis", "=", "get_cov_params", "(", "mass1", ",", "mass2", ",", "spin1z", ",", "spin2z", ",", "metricParams", ",", "fUpper", ")", "return", "totmass", ",", "eta", ",", "spin1z", ",", "spin2z", ",", "mass1", ",", "mass2", ",", "new_xis" ]
Given a set of masses, this function will create a set of points nearby in the mass space and map these to the xi space. Parameters ----------- bestMasses : list Contains [ChirpMass, eta, spin1z, spin2z]. Points will be placed around tjos scaleFactor : float This parameter describes the radius away from bestMasses that points will be placed in. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) numJumpPoints : int, optional (default = 100) The number of points that will be generated every iteration chirpMassJumpFac : float, optional (default=0.0001) The jump points will be chosen with fractional variation in chirpMass up to this multiplied by scaleFactor. etaJumpFac : float, optional (default=0.01) The jump points will be chosen with fractional variation in eta up to this multiplied by scaleFactor. spin1zJumpFac : float, optional (default=0.01) The jump points will be chosen with absolute variation in spin1z up to this multiplied by scaleFactor. spin2zJumpFac : float, optional (default=0.01) The jump points will be chosen with absolute variation in spin2z up to this multiplied by scaleFactor. Returns -------- Totmass : numpy.array Total mass of the resulting points Eta : numpy.array Symmetric mass ratio of the resulting points Spin1z : numpy.array Spin of the heavier body of the resulting points Spin2z : numpy.array Spin of the smaller body of the resulting points Diff : numpy.array Mass1 - Mass2 of the resulting points Mass1 : numpy.array Mass1 (mass of heavier body) of the resulting points Mass2 : numpy.array Mass2 (mass of smaller body) of the resulting points new_xis : list of numpy.array Position of points in the xi coordinates
[ "Given", "a", "set", "of", "masses", "this", "function", "will", "create", "a", "set", "of", "points", "nearby", "in", "the", "mass", "space", "and", "map", "these", "to", "the", "xi", "space", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/sam.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sam.py#L340-L383
def merge(args): """ %prog merge merged_bams bams1_dir bams2_dir ... Merge BAM files. Treat the bams with the same prefix as a set. Output the commands first. """ from jcvi.apps.grid import MakeManager p = OptionParser(merge.__doc__) p.set_sep(sep="_", help="Separator to group per prefix") opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) merged_bams = args[0] bamdirs = args[1:] mkdir(merged_bams) bams = [] for x in bamdirs: bams += glob(op.join(x, "*.bam")) bams = [x for x in bams if "nsorted" not in x] logging.debug("Found a total of {0} BAM files.".format(len(bams))) sep = opts.sep key = lambda x: op.basename(x).split(sep)[0] bams.sort(key=key) mm = MakeManager() for prefix, files in groupby(bams, key=key): files = sorted(list(files)) nfiles = len(files) source = " ".join(files) target = op.join(merged_bams, op.basename(files[0])) if nfiles == 1: source = get_abs_path(source) cmd = "ln -s {0} {1}".format(source, target) mm.add("", target, cmd) else: cmd = "samtools merge -@ 8 {0} {1}".format(target, source) mm.add(files, target, cmd, remove=True) mm.write()
[ "def", "merge", "(", "args", ")", ":", "from", "jcvi", ".", "apps", ".", "grid", "import", "MakeManager", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "p", ".", "set_sep", "(", "sep", "=", "\"_\"", ",", "help", "=", "\"Separator to group per prefix\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "merged_bams", "=", "args", "[", "0", "]", "bamdirs", "=", "args", "[", "1", ":", "]", "mkdir", "(", "merged_bams", ")", "bams", "=", "[", "]", "for", "x", "in", "bamdirs", ":", "bams", "+=", "glob", "(", "op", ".", "join", "(", "x", ",", "\"*.bam\"", ")", ")", "bams", "=", "[", "x", "for", "x", "in", "bams", "if", "\"nsorted\"", "not", "in", "x", "]", "logging", ".", "debug", "(", "\"Found a total of {0} BAM files.\"", ".", "format", "(", "len", "(", "bams", ")", ")", ")", "sep", "=", "opts", ".", "sep", "key", "=", "lambda", "x", ":", "op", ".", "basename", "(", "x", ")", ".", "split", "(", "sep", ")", "[", "0", "]", "bams", ".", "sort", "(", "key", "=", "key", ")", "mm", "=", "MakeManager", "(", ")", "for", "prefix", ",", "files", "in", "groupby", "(", "bams", ",", "key", "=", "key", ")", ":", "files", "=", "sorted", "(", "list", "(", "files", ")", ")", "nfiles", "=", "len", "(", "files", ")", "source", "=", "\" \"", ".", "join", "(", "files", ")", "target", "=", "op", ".", "join", "(", "merged_bams", ",", "op", ".", "basename", "(", "files", "[", "0", "]", ")", ")", "if", "nfiles", "==", "1", ":", "source", "=", "get_abs_path", "(", "source", ")", "cmd", "=", "\"ln -s {0} {1}\"", ".", "format", "(", "source", ",", "target", ")", "mm", ".", "add", "(", "\"\"", ",", "target", ",", "cmd", ")", "else", ":", "cmd", "=", "\"samtools merge -@ 8 {0} {1}\"", ".", "format", "(", "target", ",", "source", ")", "mm", ".", "add", "(", "files", ",", "target", ",", "cmd", ",", "remove", "=", "True", ")", "mm", ".", "write", "(", ")" ]
%prog merge merged_bams bams1_dir bams2_dir ... Merge BAM files. Treat the bams with the same prefix as a set. Output the commands first.
[ "%prog", "merge", "merged_bams", "bams1_dir", "bams2_dir", "..." ]
python
train
josuebrunel/yahoo-oauth
yahoo_oauth/yahoo_oauth.py
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L201-L211
def token_is_valid(self,): """Check the validity of the token :3600s """ elapsed_time = time.time() - self.token_time logger.debug("ELAPSED TIME : {0}".format(elapsed_time)) if elapsed_time > 3540: # 1 minute before it expires logger.debug("TOKEN HAS EXPIRED") return False logger.debug("TOKEN IS STILL VALID") return True
[ "def", "token_is_valid", "(", "self", ",", ")", ":", "elapsed_time", "=", "time", ".", "time", "(", ")", "-", "self", ".", "token_time", "logger", ".", "debug", "(", "\"ELAPSED TIME : {0}\"", ".", "format", "(", "elapsed_time", ")", ")", "if", "elapsed_time", ">", "3540", ":", "# 1 minute before it expires", "logger", ".", "debug", "(", "\"TOKEN HAS EXPIRED\"", ")", "return", "False", "logger", ".", "debug", "(", "\"TOKEN IS STILL VALID\"", ")", "return", "True" ]
Check the validity of the token :3600s
[ "Check", "the", "validity", "of", "the", "token", ":", "3600s" ]
python
valid
boatd/python-boatd
boatdclient/boatd_client.py
https://github.com/boatd/python-boatd/blob/404ff0d0c389f6ed84ddbfea1c41db6569ad2ed4/boatdclient/boatd_client.py#L152-L161
def set_sail(self, angle): ''' Set the angle of the sail to `angle` degrees :param angle: sail angle :type angle: float between -90 and 90 ''' angle = float(angle) request = self.boatd.post({'value': float(angle)}, '/sail') return request.get('result')
[ "def", "set_sail", "(", "self", ",", "angle", ")", ":", "angle", "=", "float", "(", "angle", ")", "request", "=", "self", ".", "boatd", ".", "post", "(", "{", "'value'", ":", "float", "(", "angle", ")", "}", ",", "'/sail'", ")", "return", "request", ".", "get", "(", "'result'", ")" ]
Set the angle of the sail to `angle` degrees :param angle: sail angle :type angle: float between -90 and 90
[ "Set", "the", "angle", "of", "the", "sail", "to", "angle", "degrees" ]
python
train
pandas-dev/pandas
pandas/io/formats/format.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1015-L1054
def _value_formatter(self, float_format=None, threshold=None): """Returns a function to be applied on each value to format it """ # the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format # we are going to compose different functions, to first convert to # a string, then replace the decimal symbol, and finally chop according # to the threshold # when there is no float_format, we use str instead of '%g' # because str(0.0) = '0.0' while '%g' % 0.0 = '0' if float_format: def base_formatter(v): return float_format(value=v) if notna(v) else self.na_rep else: def base_formatter(v): return str(v) if notna(v) else self.na_rep if self.decimal != '.': def decimal_formatter(v): return base_formatter(v).replace('.', self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notna(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter
[ "def", "_value_formatter", "(", "self", ",", "float_format", "=", "None", ",", "threshold", "=", "None", ")", ":", "# the float_format parameter supersedes self.float_format", "if", "float_format", "is", "None", ":", "float_format", "=", "self", ".", "float_format", "# we are going to compose different functions, to first convert to", "# a string, then replace the decimal symbol, and finally chop according", "# to the threshold", "# when there is no float_format, we use str instead of '%g'", "# because str(0.0) = '0.0' while '%g' % 0.0 = '0'", "if", "float_format", ":", "def", "base_formatter", "(", "v", ")", ":", "return", "float_format", "(", "value", "=", "v", ")", "if", "notna", "(", "v", ")", "else", "self", ".", "na_rep", "else", ":", "def", "base_formatter", "(", "v", ")", ":", "return", "str", "(", "v", ")", "if", "notna", "(", "v", ")", "else", "self", ".", "na_rep", "if", "self", ".", "decimal", "!=", "'.'", ":", "def", "decimal_formatter", "(", "v", ")", ":", "return", "base_formatter", "(", "v", ")", ".", "replace", "(", "'.'", ",", "self", ".", "decimal", ",", "1", ")", "else", ":", "decimal_formatter", "=", "base_formatter", "if", "threshold", "is", "None", ":", "return", "decimal_formatter", "def", "formatter", "(", "value", ")", ":", "if", "notna", "(", "value", ")", ":", "if", "abs", "(", "value", ")", ">", "threshold", ":", "return", "decimal_formatter", "(", "value", ")", "else", ":", "return", "decimal_formatter", "(", "0.0", ")", "else", ":", "return", "self", ".", "na_rep", "return", "formatter" ]
Returns a function to be applied on each value to format it
[ "Returns", "a", "function", "to", "be", "applied", "on", "each", "value", "to", "format", "it" ]
python
train
openstax/cnx-epub
cnxepub/adapters.py
https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/adapters.py#L227-L261
def _node_to_model(tree_or_item, package, parent=None, lucent_id=TRANSLUCENT_BINDER_ID): """Given a tree, parse to a set of models""" if 'contents' in tree_or_item: # It is a binder. tree = tree_or_item # Grab the package metadata, so we have required license info metadata = package.metadata.copy() if tree['id'] == lucent_id: metadata['title'] = tree['title'] binder = TranslucentBinder(metadata=metadata) else: try: package_item = package.grab_by_name(tree['id']) binder = BinderItem(package_item, package) except KeyError: # Translucent w/ id metadata.update({ 'title': tree['title'], 'cnx-archive-uri': tree['id'], 'cnx-archive-shortid': tree['shortId']}) binder = Binder(tree['id'], metadata=metadata) for item in tree['contents']: node = _node_to_model(item, package, parent=binder, lucent_id=lucent_id) if node.metadata['title'] != item['title']: binder.set_title_for_node(node, item['title']) result = binder else: # It is a document. item = tree_or_item package_item = package.grab_by_name(item['id']) result = adapt_item(package_item, package) if parent is not None: parent.append(result) return result
[ "def", "_node_to_model", "(", "tree_or_item", ",", "package", ",", "parent", "=", "None", ",", "lucent_id", "=", "TRANSLUCENT_BINDER_ID", ")", ":", "if", "'contents'", "in", "tree_or_item", ":", "# It is a binder.", "tree", "=", "tree_or_item", "# Grab the package metadata, so we have required license info", "metadata", "=", "package", ".", "metadata", ".", "copy", "(", ")", "if", "tree", "[", "'id'", "]", "==", "lucent_id", ":", "metadata", "[", "'title'", "]", "=", "tree", "[", "'title'", "]", "binder", "=", "TranslucentBinder", "(", "metadata", "=", "metadata", ")", "else", ":", "try", ":", "package_item", "=", "package", ".", "grab_by_name", "(", "tree", "[", "'id'", "]", ")", "binder", "=", "BinderItem", "(", "package_item", ",", "package", ")", "except", "KeyError", ":", "# Translucent w/ id", "metadata", ".", "update", "(", "{", "'title'", ":", "tree", "[", "'title'", "]", ",", "'cnx-archive-uri'", ":", "tree", "[", "'id'", "]", ",", "'cnx-archive-shortid'", ":", "tree", "[", "'shortId'", "]", "}", ")", "binder", "=", "Binder", "(", "tree", "[", "'id'", "]", ",", "metadata", "=", "metadata", ")", "for", "item", "in", "tree", "[", "'contents'", "]", ":", "node", "=", "_node_to_model", "(", "item", ",", "package", ",", "parent", "=", "binder", ",", "lucent_id", "=", "lucent_id", ")", "if", "node", ".", "metadata", "[", "'title'", "]", "!=", "item", "[", "'title'", "]", ":", "binder", ".", "set_title_for_node", "(", "node", ",", "item", "[", "'title'", "]", ")", "result", "=", "binder", "else", ":", "# It is a document.", "item", "=", "tree_or_item", "package_item", "=", "package", ".", "grab_by_name", "(", "item", "[", "'id'", "]", ")", "result", "=", "adapt_item", "(", "package_item", ",", "package", ")", "if", "parent", "is", "not", "None", ":", "parent", ".", "append", "(", "result", ")", "return", "result" ]
Given a tree, parse to a set of models
[ "Given", "a", "tree", "parse", "to", "a", "set", "of", "models" ]
python
train
aio-libs/sockjs
sockjs/session.py
https://github.com/aio-libs/sockjs/blob/e5d5f24f6a1377419b13199ecf631df66667bcbb/sockjs/session.py#L231-L239
def send_frame(self, frm): """send message frame to client.""" if self._debug: log.info("outgoing message: %s, %s", self.id, frm[:200]) if self.state != STATE_OPEN: return self._feed(FRAME_MESSAGE_BLOB, frm)
[ "def", "send_frame", "(", "self", ",", "frm", ")", ":", "if", "self", ".", "_debug", ":", "log", ".", "info", "(", "\"outgoing message: %s, %s\"", ",", "self", ".", "id", ",", "frm", "[", ":", "200", "]", ")", "if", "self", ".", "state", "!=", "STATE_OPEN", ":", "return", "self", ".", "_feed", "(", "FRAME_MESSAGE_BLOB", ",", "frm", ")" ]
send message frame to client.
[ "send", "message", "frame", "to", "client", "." ]
python
train
fy0/slim
slim/base/permission.py
https://github.com/fy0/slim/blob/9951a910750888dbe7dd3e98acae9c40efae0689/slim/base/permission.py#L130-L142
def add_common_check(self, actions, table, func): """ emitted before query :param actions: :param table: :param func: :return: """ self.common_checks.append([table, actions, func]) """def func(ability, user, action, available_columns: list): pass """
[ "def", "add_common_check", "(", "self", ",", "actions", ",", "table", ",", "func", ")", ":", "self", ".", "common_checks", ".", "append", "(", "[", "table", ",", "actions", ",", "func", "]", ")", "\"\"\"def func(ability, user, action, available_columns: list):\n pass\n \"\"\"" ]
emitted before query :param actions: :param table: :param func: :return:
[ "emitted", "before", "query", ":", "param", "actions", ":", ":", "param", "table", ":", ":", "param", "func", ":", ":", "return", ":" ]
python
valid
albahnsen/CostSensitiveClassification
costcla/models/bagging.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/bagging.py#L134-L150
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight): """Private function used to compute predictions within a job.""" n_samples = X.shape[0] pred = np.zeros((n_samples, n_classes)) n_estimators = len(estimators) for estimator, features, weight in zip(estimators, estimators_features, estimators_weight): # Resort to voting predictions = estimator.predict(X[:, features]) for i in range(n_samples): if combination == 'weighted_voting': pred[i, int(predictions[i])] += 1 * weight else: pred[i, int(predictions[i])] += 1 return pred
[ "def", "_parallel_predict", "(", "estimators", ",", "estimators_features", ",", "X", ",", "n_classes", ",", "combination", ",", "estimators_weight", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "pred", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_classes", ")", ")", "n_estimators", "=", "len", "(", "estimators", ")", "for", "estimator", ",", "features", ",", "weight", "in", "zip", "(", "estimators", ",", "estimators_features", ",", "estimators_weight", ")", ":", "# Resort to voting", "predictions", "=", "estimator", ".", "predict", "(", "X", "[", ":", ",", "features", "]", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "if", "combination", "==", "'weighted_voting'", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "*", "weight", "else", ":", "pred", "[", "i", ",", "int", "(", "predictions", "[", "i", "]", ")", "]", "+=", "1", "return", "pred" ]
Private function used to compute predictions within a job.
[ "Private", "function", "used", "to", "compute", "predictions", "within", "a", "job", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xscintillaedit/xscintillaedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xscintillaedit/xscintillaedit.py#L88-L104
def addBreakpoint( self, lineno = -1 ): """ Adds a breakpoint for the given line number to this edit. :note The lineno is 0-based, while the editor displays lines as a 1-based system. So, if you want to put a breakpoint at visual line 3, you would pass in lineno as 2 :param lineno | <int> """ if ( lineno == -1 ): lineno, colno = self.getCursorPosition() self.markerAdd(lineno, self._breakpointMarker) if ( not self.signalsBlocked() ): self.breakpointsChanged.emit()
[ "def", "addBreakpoint", "(", "self", ",", "lineno", "=", "-", "1", ")", ":", "if", "(", "lineno", "==", "-", "1", ")", ":", "lineno", ",", "colno", "=", "self", ".", "getCursorPosition", "(", ")", "self", ".", "markerAdd", "(", "lineno", ",", "self", ".", "_breakpointMarker", ")", "if", "(", "not", "self", ".", "signalsBlocked", "(", ")", ")", ":", "self", ".", "breakpointsChanged", ".", "emit", "(", ")" ]
Adds a breakpoint for the given line number to this edit. :note The lineno is 0-based, while the editor displays lines as a 1-based system. So, if you want to put a breakpoint at visual line 3, you would pass in lineno as 2 :param lineno | <int>
[ "Adds", "a", "breakpoint", "for", "the", "given", "line", "number", "to", "this", "edit", ".", ":", "note", "The", "lineno", "is", "0", "-", "based", "while", "the", "editor", "displays", "lines", "as", "a", "1", "-", "based", "system", ".", "So", "if", "you", "want", "to", "put", "a", "breakpoint", "at", "visual", "line", "3", "you", "would", "pass", "in", "lineno", "as", "2", ":", "param", "lineno", "|", "<int", ">" ]
python
train
thombashi/typepy
typepy/type/_base.py
https://github.com/thombashi/typepy/blob/8209d1df4f2a7f196a9fa4bfb0708c5ff648461f/typepy/type/_base.py#L109-L121
def convert(self): """ :return: Converted value. :raises typepy.TypeConversionError: If the value cannot convert. """ if self.is_type(): return self.force_convert() raise TypeConversionError( "failed to convert from {} to {}".format(type(self._data).__name__, self.typename) )
[ "def", "convert", "(", "self", ")", ":", "if", "self", ".", "is_type", "(", ")", ":", "return", "self", ".", "force_convert", "(", ")", "raise", "TypeConversionError", "(", "\"failed to convert from {} to {}\"", ".", "format", "(", "type", "(", "self", ".", "_data", ")", ".", "__name__", ",", "self", ".", "typename", ")", ")" ]
:return: Converted value. :raises typepy.TypeConversionError: If the value cannot convert.
[ ":", "return", ":", "Converted", "value", ".", ":", "raises", "typepy", ".", "TypeConversionError", ":", "If", "the", "value", "cannot", "convert", "." ]
python
train
chrisjrn/registrasion
registrasion/forms.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/forms.py#L437-L459
def staff_products_form_factory(user): ''' Creates a StaffProductsForm that restricts the available products to those that are available to a user. ''' products = inventory.Product.objects.all() products = ProductController.available_products(user, products=products) product_ids = [product.id for product in products] product_set = inventory.Product.objects.filter(id__in=product_ids) class StaffProductsForm(forms.Form): ''' Form for allowing staff to add an item to a user's cart. ''' product = forms.ModelChoiceField( widget=forms.Select, queryset=product_set, ) quantity = forms.IntegerField( min_value=0, ) return StaffProductsForm
[ "def", "staff_products_form_factory", "(", "user", ")", ":", "products", "=", "inventory", ".", "Product", ".", "objects", ".", "all", "(", ")", "products", "=", "ProductController", ".", "available_products", "(", "user", ",", "products", "=", "products", ")", "product_ids", "=", "[", "product", ".", "id", "for", "product", "in", "products", "]", "product_set", "=", "inventory", ".", "Product", ".", "objects", ".", "filter", "(", "id__in", "=", "product_ids", ")", "class", "StaffProductsForm", "(", "forms", ".", "Form", ")", ":", "''' Form for allowing staff to add an item to a user's cart. '''", "product", "=", "forms", ".", "ModelChoiceField", "(", "widget", "=", "forms", ".", "Select", ",", "queryset", "=", "product_set", ",", ")", "quantity", "=", "forms", ".", "IntegerField", "(", "min_value", "=", "0", ",", ")", "return", "StaffProductsForm" ]
Creates a StaffProductsForm that restricts the available products to those that are available to a user.
[ "Creates", "a", "StaffProductsForm", "that", "restricts", "the", "available", "products", "to", "those", "that", "are", "available", "to", "a", "user", "." ]
python
test
ninuxorg/nodeshot
nodeshot/community/participation/models/__init__.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/participation/models/__init__.py#L160-L168
def create_layer_rating_settings(sender, **kwargs): """ create layer rating settings """ created = kwargs['created'] layer = kwargs['instance'] if created: # create layer participation settings # task will be executed in background unless settings.CELERY_ALWAYS_EAGER is True # if CELERY_ALWAYS_EAGER is False celery worker must be running otherwise task won't be executed create_related_object.delay(LayerParticipationSettings, {'layer': layer})
[ "def", "create_layer_rating_settings", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "created", "=", "kwargs", "[", "'created'", "]", "layer", "=", "kwargs", "[", "'instance'", "]", "if", "created", ":", "# create layer participation settings", "# task will be executed in background unless settings.CELERY_ALWAYS_EAGER is True", "# if CELERY_ALWAYS_EAGER is False celery worker must be running otherwise task won't be executed", "create_related_object", ".", "delay", "(", "LayerParticipationSettings", ",", "{", "'layer'", ":", "layer", "}", ")" ]
create layer rating settings
[ "create", "layer", "rating", "settings" ]
python
train
utiasSTARS/pykitti
pykitti/raw.py
https://github.com/utiasSTARS/pykitti/blob/d3e1bb81676e831886726cc5ed79ce1f049aef2c/pykitti/raw.py#L225-L244
def _load_calib(self): """Load and compute intrinsic and extrinsic calibration parameters.""" # We'll build the calibration parameters as a dictionary, then # convert it to a namedtuple to prevent it from being modified later data = {} # Load the rigid transformation from IMU to velodyne data['T_velo_imu'] = self._load_calib_rigid('calib_imu_to_velo.txt') # Load the camera intrinsics and extrinsics data.update(self._load_calib_cam_to_cam( 'calib_velo_to_cam.txt', 'calib_cam_to_cam.txt')) # Pre-compute the IMU to rectified camera coordinate transforms data['T_cam0_imu'] = data['T_cam0_velo'].dot(data['T_velo_imu']) data['T_cam1_imu'] = data['T_cam1_velo'].dot(data['T_velo_imu']) data['T_cam2_imu'] = data['T_cam2_velo'].dot(data['T_velo_imu']) data['T_cam3_imu'] = data['T_cam3_velo'].dot(data['T_velo_imu']) self.calib = namedtuple('CalibData', data.keys())(*data.values())
[ "def", "_load_calib", "(", "self", ")", ":", "# We'll build the calibration parameters as a dictionary, then", "# convert it to a namedtuple to prevent it from being modified later", "data", "=", "{", "}", "# Load the rigid transformation from IMU to velodyne", "data", "[", "'T_velo_imu'", "]", "=", "self", ".", "_load_calib_rigid", "(", "'calib_imu_to_velo.txt'", ")", "# Load the camera intrinsics and extrinsics", "data", ".", "update", "(", "self", ".", "_load_calib_cam_to_cam", "(", "'calib_velo_to_cam.txt'", ",", "'calib_cam_to_cam.txt'", ")", ")", "# Pre-compute the IMU to rectified camera coordinate transforms", "data", "[", "'T_cam0_imu'", "]", "=", "data", "[", "'T_cam0_velo'", "]", ".", "dot", "(", "data", "[", "'T_velo_imu'", "]", ")", "data", "[", "'T_cam1_imu'", "]", "=", "data", "[", "'T_cam1_velo'", "]", ".", "dot", "(", "data", "[", "'T_velo_imu'", "]", ")", "data", "[", "'T_cam2_imu'", "]", "=", "data", "[", "'T_cam2_velo'", "]", ".", "dot", "(", "data", "[", "'T_velo_imu'", "]", ")", "data", "[", "'T_cam3_imu'", "]", "=", "data", "[", "'T_cam3_velo'", "]", ".", "dot", "(", "data", "[", "'T_velo_imu'", "]", ")", "self", ".", "calib", "=", "namedtuple", "(", "'CalibData'", ",", "data", ".", "keys", "(", ")", ")", "(", "*", "data", ".", "values", "(", ")", ")" ]
Load and compute intrinsic and extrinsic calibration parameters.
[ "Load", "and", "compute", "intrinsic", "and", "extrinsic", "calibration", "parameters", "." ]
python
train
lewisjared/credkeep
credkeep/util.py
https://github.com/lewisjared/credkeep/blob/63638ced094992552a28109b91839bcbbbe9230a/credkeep/util.py#L12-L27
def clear_to_enc_filename(fname): """ Converts the filename of a cleartext file and convert it to an encrypted filename :param fname: :return: filename of encrypted secret file if found, else None """ if not fname.lower().endswith('.json'): raise CredkeepException('Invalid filetype') if fname.lower().endswith('.enc.json'): raise CredkeepException('File already encrypted') enc_fname = fname[:-4] + 'enc.json' return enc_fname if exists(enc_fname) else None
[ "def", "clear_to_enc_filename", "(", "fname", ")", ":", "if", "not", "fname", ".", "lower", "(", ")", ".", "endswith", "(", "'.json'", ")", ":", "raise", "CredkeepException", "(", "'Invalid filetype'", ")", "if", "fname", ".", "lower", "(", ")", ".", "endswith", "(", "'.enc.json'", ")", ":", "raise", "CredkeepException", "(", "'File already encrypted'", ")", "enc_fname", "=", "fname", "[", ":", "-", "4", "]", "+", "'enc.json'", "return", "enc_fname", "if", "exists", "(", "enc_fname", ")", "else", "None" ]
Converts the filename of a cleartext file and convert it to an encrypted filename :param fname: :return: filename of encrypted secret file if found, else None
[ "Converts", "the", "filename", "of", "a", "cleartext", "file", "and", "convert", "it", "to", "an", "encrypted", "filename" ]
python
train
stephanepechard/projy
projy/templates/ProjyTemplateTemplate.py
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/ProjyTemplateTemplate.py#L28-L35
def substitutes(self): """ Return the substitutions for the templating replacements. """ substitute_dict = dict( project = self.project_name, template = self.project_name + 'Template', file = self.project_name + 'FileTemplate', ) return substitute_dict
[ "def", "substitutes", "(", "self", ")", ":", "substitute_dict", "=", "dict", "(", "project", "=", "self", ".", "project_name", ",", "template", "=", "self", ".", "project_name", "+", "'Template'", ",", "file", "=", "self", ".", "project_name", "+", "'FileTemplate'", ",", ")", "return", "substitute_dict" ]
Return the substitutions for the templating replacements.
[ "Return", "the", "substitutions", "for", "the", "templating", "replacements", "." ]
python
train
eugene-eeo/signalsdb
signalsdb/api.py
https://github.com/eugene-eeo/signalsdb/blob/d6129c5790c89dec20bc8bfde66ef7b909a38146/signalsdb/api.py#L41-L58
def search(signal='', action='', signals=SIGNALS): """ Search the signals DB for signal named *signal*, and which action matches *action* in a case insensitive way. :param signal: Regex for signal name. :param action: Regex for default action. :param signals: Database of signals. """ sig_re = re.compile(signal, re.IGNORECASE) act_re = re.compile(action, re.IGNORECASE) res = [] for code in signals: sig, act, _ = signals[code] if sig_re.match(sig) and act_re.match(act): res.append(explain(code, signals=signals)) return res
[ "def", "search", "(", "signal", "=", "''", ",", "action", "=", "''", ",", "signals", "=", "SIGNALS", ")", ":", "sig_re", "=", "re", ".", "compile", "(", "signal", ",", "re", ".", "IGNORECASE", ")", "act_re", "=", "re", ".", "compile", "(", "action", ",", "re", ".", "IGNORECASE", ")", "res", "=", "[", "]", "for", "code", "in", "signals", ":", "sig", ",", "act", ",", "_", "=", "signals", "[", "code", "]", "if", "sig_re", ".", "match", "(", "sig", ")", "and", "act_re", ".", "match", "(", "act", ")", ":", "res", ".", "append", "(", "explain", "(", "code", ",", "signals", "=", "signals", ")", ")", "return", "res" ]
Search the signals DB for signal named *signal*, and which action matches *action* in a case insensitive way. :param signal: Regex for signal name. :param action: Regex for default action. :param signals: Database of signals.
[ "Search", "the", "signals", "DB", "for", "signal", "named", "*", "signal", "*", "and", "which", "action", "matches", "*", "action", "*", "in", "a", "case", "insensitive", "way", "." ]
python
train
Becksteinlab/GromacsWrapper
gromacs/core.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/core.py#L581-L606
def _build_arg_list(self, **kwargs): """Build list of arguments from the dict; keys must be valid gromacs flags.""" arglist = [] for flag, value in kwargs.items(): # XXX: check flag against allowed values flag = str(flag) if flag.startswith('_'): flag = flag[1:] # python-illegal keywords are '_'-quoted if not flag.startswith('-'): flag = '-' + flag # now flag is guaranteed to start with '-' if value is True: arglist.append(flag) # simple command line flag elif value is False: if flag.startswith('-no'): # negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?) arglist.append('-' + flag[3:]) else: arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no' elif value is None: pass # ignore flag = None else: try: arglist.extend([flag] + value) # option with value list except TypeError: arglist.extend([flag, value]) # option with single value return list(map(str, arglist))
[ "def", "_build_arg_list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "arglist", "=", "[", "]", "for", "flag", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "# XXX: check flag against allowed values", "flag", "=", "str", "(", "flag", ")", "if", "flag", ".", "startswith", "(", "'_'", ")", ":", "flag", "=", "flag", "[", "1", ":", "]", "# python-illegal keywords are '_'-quoted", "if", "not", "flag", ".", "startswith", "(", "'-'", ")", ":", "flag", "=", "'-'", "+", "flag", "# now flag is guaranteed to start with '-'", "if", "value", "is", "True", ":", "arglist", ".", "append", "(", "flag", ")", "# simple command line flag", "elif", "value", "is", "False", ":", "if", "flag", ".", "startswith", "(", "'-no'", ")", ":", "# negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?)", "arglist", ".", "append", "(", "'-'", "+", "flag", "[", "3", ":", "]", ")", "else", ":", "arglist", ".", "append", "(", "'-no'", "+", "flag", "[", "1", ":", "]", ")", "# gromacs switches booleans by prefixing 'no'", "elif", "value", "is", "None", ":", "pass", "# ignore flag = None", "else", ":", "try", ":", "arglist", ".", "extend", "(", "[", "flag", "]", "+", "value", ")", "# option with value list", "except", "TypeError", ":", "arglist", ".", "extend", "(", "[", "flag", ",", "value", "]", ")", "# option with single value", "return", "list", "(", "map", "(", "str", ",", "arglist", ")", ")" ]
Build list of arguments from the dict; keys must be valid gromacs flags.
[ "Build", "list", "of", "arguments", "from", "the", "dict", ";", "keys", "must", "be", "valid", "gromacs", "flags", "." ]
python
valid
awkman/pywifi
pywifi/iface.py
https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/iface.py#L80-L93
def network_profiles(self): """Get all the AP profiles.""" profiles = self._wifi_ctrl.network_profiles(self._raw_obj) if self._logger.isEnabledFor(logging.INFO): for profile in profiles: self._logger.info("Get profile:") self._logger.info("\tssid: %s", profile.ssid) self._logger.info("\tauth: %s", profile.auth) self._logger.info("\takm: %s", profile.akm) self._logger.info("\tcipher: %s", profile.cipher) return profiles
[ "def", "network_profiles", "(", "self", ")", ":", "profiles", "=", "self", ".", "_wifi_ctrl", ".", "network_profiles", "(", "self", ".", "_raw_obj", ")", "if", "self", ".", "_logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "for", "profile", "in", "profiles", ":", "self", ".", "_logger", ".", "info", "(", "\"Get profile:\"", ")", "self", ".", "_logger", ".", "info", "(", "\"\\tssid: %s\"", ",", "profile", ".", "ssid", ")", "self", ".", "_logger", ".", "info", "(", "\"\\tauth: %s\"", ",", "profile", ".", "auth", ")", "self", ".", "_logger", ".", "info", "(", "\"\\takm: %s\"", ",", "profile", ".", "akm", ")", "self", ".", "_logger", ".", "info", "(", "\"\\tcipher: %s\"", ",", "profile", ".", "cipher", ")", "return", "profiles" ]
Get all the AP profiles.
[ "Get", "all", "the", "AP", "profiles", "." ]
python
train
ASMfreaK/habitipy
habitipy/api.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L287-L297
def download_api(branch=None) -> str: """download API documentation from _branch_ of Habitica\'s repo on Github""" habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica' if not branch: branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name'] curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)] tar = local['tar'][ 'axzf', '-', '--wildcards', '*/website/server/controllers/api-v3/*', '--to-stdout'] grep = local['grep']['@api'] sed = local['sed']['-e', 's/^[ */]*//g', '-e', 's/ / /g', '-'] return (curl | tar | grep | sed)()
[ "def", "download_api", "(", "branch", "=", "None", ")", "->", "str", ":", "habitica_github_api", "=", "'https://api.github.com/repos/HabitRPG/habitica'", "if", "not", "branch", ":", "branch", "=", "requests", ".", "get", "(", "habitica_github_api", "+", "'/releases/latest'", ")", ".", "json", "(", ")", "[", "'tag_name'", "]", "curl", "=", "local", "[", "'curl'", "]", "[", "'-sL'", ",", "habitica_github_api", "+", "'/tarball/{}'", ".", "format", "(", "branch", ")", "]", "tar", "=", "local", "[", "'tar'", "]", "[", "'axzf'", ",", "'-'", ",", "'--wildcards'", ",", "'*/website/server/controllers/api-v3/*'", ",", "'--to-stdout'", "]", "grep", "=", "local", "[", "'grep'", "]", "[", "'@api'", "]", "sed", "=", "local", "[", "'sed'", "]", "[", "'-e'", ",", "'s/^[ */]*//g'", ",", "'-e'", ",", "'s/ / /g'", ",", "'-'", "]", "return", "(", "curl", "|", "tar", "|", "grep", "|", "sed", ")", "(", ")" ]
download API documentation from _branch_ of Habitica\'s repo on Github
[ "download", "API", "documentation", "from", "_branch_", "of", "Habitica", "\\", "s", "repo", "on", "Github" ]
python
train
thespacedoctor/transientNamer
transientNamer/search.py
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L820-L843
def _file_prefix( self): """*Generate a file prefix based on the type of search for saving files to disk* **Return:** - ``prefix`` -- the file prefix """ self.log.info('starting the ``_file_prefix`` method') if self.ra: now = datetime.now() prefix = now.strftime("%Y%m%dt%H%M%S%f_tns_conesearch_") elif self.name: prefix = self.name + "_tns_conesearch_" elif self.internal_name: prefix = self.internal_name + "_tns_conesearch_" elif self.discInLastDays: discInLastDays = str(self.discInLastDays) now = datetime.now() prefix = now.strftime( discInLastDays + "d_since_%Y%m%d_tns_conesearch_") self.log.info('completed the ``_file_prefix`` method') return prefix
[ "def", "_file_prefix", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_file_prefix`` method'", ")", "if", "self", ".", "ra", ":", "now", "=", "datetime", ".", "now", "(", ")", "prefix", "=", "now", ".", "strftime", "(", "\"%Y%m%dt%H%M%S%f_tns_conesearch_\"", ")", "elif", "self", ".", "name", ":", "prefix", "=", "self", ".", "name", "+", "\"_tns_conesearch_\"", "elif", "self", ".", "internal_name", ":", "prefix", "=", "self", ".", "internal_name", "+", "\"_tns_conesearch_\"", "elif", "self", ".", "discInLastDays", ":", "discInLastDays", "=", "str", "(", "self", ".", "discInLastDays", ")", "now", "=", "datetime", ".", "now", "(", ")", "prefix", "=", "now", ".", "strftime", "(", "discInLastDays", "+", "\"d_since_%Y%m%d_tns_conesearch_\"", ")", "self", ".", "log", ".", "info", "(", "'completed the ``_file_prefix`` method'", ")", "return", "prefix" ]
*Generate a file prefix based on the type of search for saving files to disk* **Return:** - ``prefix`` -- the file prefix
[ "*", "Generate", "a", "file", "prefix", "based", "on", "the", "type", "of", "search", "for", "saving", "files", "to", "disk", "*" ]
python
train
tomer8007/kik-bot-api-unofficial
kik_unofficial/client.py
https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L544-L565
def _kik_connection_thread_function(self): """ The Kik Connection thread main function. Initiates the asyncio loop and actually connects. """ # If there is already a connection going, than wait for it to stop if self.loop and self.loop.is_running(): self.loop.call_soon_threadsafe(self.connection.close) log.debug("[!] Waiting for the previous connection to stop.") while self.loop.is_running(): log.debug("[!] Still Waiting for the previous connection to stop.") time.sleep(1) log.info("[+] Initiating the Kik Connection thread and connecting to kik server...") # create the connection and launch the asyncio loop self.connection = KikConnection(self.loop, self) coro = self.loop.create_connection(lambda: self.connection, HOST, PORT, ssl=True) self.loop.run_until_complete(coro) log.debug("[!] Running main loop") self.loop.run_forever() log.debug("[!] Main loop ended.")
[ "def", "_kik_connection_thread_function", "(", "self", ")", ":", "# If there is already a connection going, than wait for it to stop", "if", "self", ".", "loop", "and", "self", ".", "loop", ".", "is_running", "(", ")", ":", "self", ".", "loop", ".", "call_soon_threadsafe", "(", "self", ".", "connection", ".", "close", ")", "log", ".", "debug", "(", "\"[!] Waiting for the previous connection to stop.\"", ")", "while", "self", ".", "loop", ".", "is_running", "(", ")", ":", "log", ".", "debug", "(", "\"[!] Still Waiting for the previous connection to stop.\"", ")", "time", ".", "sleep", "(", "1", ")", "log", ".", "info", "(", "\"[+] Initiating the Kik Connection thread and connecting to kik server...\"", ")", "# create the connection and launch the asyncio loop", "self", ".", "connection", "=", "KikConnection", "(", "self", ".", "loop", ",", "self", ")", "coro", "=", "self", ".", "loop", ".", "create_connection", "(", "lambda", ":", "self", ".", "connection", ",", "HOST", ",", "PORT", ",", "ssl", "=", "True", ")", "self", ".", "loop", ".", "run_until_complete", "(", "coro", ")", "log", ".", "debug", "(", "\"[!] Running main loop\"", ")", "self", ".", "loop", ".", "run_forever", "(", ")", "log", ".", "debug", "(", "\"[!] Main loop ended.\"", ")" ]
The Kik Connection thread main function. Initiates the asyncio loop and actually connects.
[ "The", "Kik", "Connection", "thread", "main", "function", ".", "Initiates", "the", "asyncio", "loop", "and", "actually", "connects", "." ]
python
train
artefactual-labs/agentarchives
agentarchives/atom/client.py
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/atom/client.py#L278-L304
def collection_list(self, resource_id, resource_type="collection"): """ Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list: """ def fetch_children(children): results = [] for child in children: results.append(child["slug"]) if "children" in child: results.extend(fetch_children(child["children"])) return results response = self._get( urljoin(self.base_url, "informationobjects/tree/{}".format(resource_id)) ) tree = response.json() return fetch_children(tree["children"])
[ "def", "collection_list", "(", "self", ",", "resource_id", ",", "resource_type", "=", "\"collection\"", ")", ":", "def", "fetch_children", "(", "children", ")", ":", "results", "=", "[", "]", "for", "child", "in", "children", ":", "results", ".", "append", "(", "child", "[", "\"slug\"", "]", ")", "if", "\"children\"", "in", "child", ":", "results", ".", "extend", "(", "fetch_children", "(", "child", "[", "\"children\"", "]", ")", ")", "return", "results", "response", "=", "self", ".", "_get", "(", "urljoin", "(", "self", ".", "base_url", ",", "\"informationobjects/tree/{}\"", ".", "format", "(", "resource_id", ")", ")", ")", "tree", "=", "response", ".", "json", "(", ")", "return", "fetch_children", "(", "tree", "[", "\"children\"", "]", ")" ]
Fetches a list of slug representing descriptions within the specified parent description. :param resource_id str: The slug of the description to fetch children from. :param resource_type str: no-op; not required or used in this implementation. :return: A list of strings representing the slugs for all children of the requested description. :rtype list:
[ "Fetches", "a", "list", "of", "slug", "representing", "descriptions", "within", "the", "specified", "parent", "description", "." ]
python
train
google/dotty
efilter/parsers/common/grammar.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/grammar.py#L234-L242
def keyword(tokens, expected): """Case-insensitive keyword match.""" try: token = next(iter(tokens)) except StopIteration: return if token and token.name == "symbol" and token.value.lower() == expected: return TokenMatch(None, token.value, (token,))
[ "def", "keyword", "(", "tokens", ",", "expected", ")", ":", "try", ":", "token", "=", "next", "(", "iter", "(", "tokens", ")", ")", "except", "StopIteration", ":", "return", "if", "token", "and", "token", ".", "name", "==", "\"symbol\"", "and", "token", ".", "value", ".", "lower", "(", ")", "==", "expected", ":", "return", "TokenMatch", "(", "None", ",", "token", ".", "value", ",", "(", "token", ",", ")", ")" ]
Case-insensitive keyword match.
[ "Case", "-", "insensitive", "keyword", "match", "." ]
python
train
Skyscanner/pycfmodel
pycfmodel/model/resources/properties/policy_document.py
https://github.com/Skyscanner/pycfmodel/blob/e3da4db96f59c0a5dba06ae66ad25645775e5500/pycfmodel/model/resources/properties/policy_document.py#L191-L204
def wildcard_allowed_principals(self, pattern=None): """ Find statements which allow wildcard principals. A pattern can be specified for the wildcard principal """ wildcard_allowed = [] for statement in self.statements: if statement.wildcard_principals(pattern) and statement.effect == "Allow": wildcard_allowed.append(statement) return wildcard_allowed
[ "def", "wildcard_allowed_principals", "(", "self", ",", "pattern", "=", "None", ")", ":", "wildcard_allowed", "=", "[", "]", "for", "statement", "in", "self", ".", "statements", ":", "if", "statement", ".", "wildcard_principals", "(", "pattern", ")", "and", "statement", ".", "effect", "==", "\"Allow\"", ":", "wildcard_allowed", ".", "append", "(", "statement", ")", "return", "wildcard_allowed" ]
Find statements which allow wildcard principals. A pattern can be specified for the wildcard principal
[ "Find", "statements", "which", "allow", "wildcard", "principals", "." ]
python
train
sorgerlab/indra
indra/sources/reach/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L60-L75
def get_all_events(self): """Gather all event IDs in the REACH output by type. These IDs are stored in the self.all_events dict. """ self.all_events = {} events = self.tree.execute("$.events.frames") if events is None: return for e in events: event_type = e.get('type') frame_id = e.get('frame_id') try: self.all_events[event_type].append(frame_id) except KeyError: self.all_events[event_type] = [frame_id]
[ "def", "get_all_events", "(", "self", ")", ":", "self", ".", "all_events", "=", "{", "}", "events", "=", "self", ".", "tree", ".", "execute", "(", "\"$.events.frames\"", ")", "if", "events", "is", "None", ":", "return", "for", "e", "in", "events", ":", "event_type", "=", "e", ".", "get", "(", "'type'", ")", "frame_id", "=", "e", ".", "get", "(", "'frame_id'", ")", "try", ":", "self", ".", "all_events", "[", "event_type", "]", ".", "append", "(", "frame_id", ")", "except", "KeyError", ":", "self", ".", "all_events", "[", "event_type", "]", "=", "[", "frame_id", "]" ]
Gather all event IDs in the REACH output by type. These IDs are stored in the self.all_events dict.
[ "Gather", "all", "event", "IDs", "in", "the", "REACH", "output", "by", "type", "." ]
python
train
tamasgal/km3pipe
km3pipe/dataclasses.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/dataclasses.py#L421-L435
def sorted(self, by, **kwargs): """Sort array by a column. Parameters ========== by: str Name of the columns to sort by(e.g. 'time'). """ sort_idc = np.argsort(self[by], **kwargs) return self.__class__( self[sort_idc], h5loc=self.h5loc, split_h5=self.split_h5, name=self.name )
[ "def", "sorted", "(", "self", ",", "by", ",", "*", "*", "kwargs", ")", ":", "sort_idc", "=", "np", ".", "argsort", "(", "self", "[", "by", "]", ",", "*", "*", "kwargs", ")", "return", "self", ".", "__class__", "(", "self", "[", "sort_idc", "]", ",", "h5loc", "=", "self", ".", "h5loc", ",", "split_h5", "=", "self", ".", "split_h5", ",", "name", "=", "self", ".", "name", ")" ]
Sort array by a column. Parameters ========== by: str Name of the columns to sort by(e.g. 'time').
[ "Sort", "array", "by", "a", "column", "." ]
python
train
mar10/pyftpsync
ftpsync/targets.py
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L201-L208
def check_write(self, name): """Raise exception if writing cur_dir/name is not allowed.""" assert compat.is_native(name) if self.readonly and name not in ( DirMetadata.META_FILE_NAME, DirMetadata.LOCK_FILE_NAME, ): raise RuntimeError("Target is read-only: {} + {} / ".format(self, name))
[ "def", "check_write", "(", "self", ",", "name", ")", ":", "assert", "compat", ".", "is_native", "(", "name", ")", "if", "self", ".", "readonly", "and", "name", "not", "in", "(", "DirMetadata", ".", "META_FILE_NAME", ",", "DirMetadata", ".", "LOCK_FILE_NAME", ",", ")", ":", "raise", "RuntimeError", "(", "\"Target is read-only: {} + {} / \"", ".", "format", "(", "self", ",", "name", ")", ")" ]
Raise exception if writing cur_dir/name is not allowed.
[ "Raise", "exception", "if", "writing", "cur_dir", "/", "name", "is", "not", "allowed", "." ]
python
train
onecodex/onecodex
onecodex/distance.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/distance.py#L44-L73
def beta_diversity(self, metric="braycurtis", rank="auto"): """Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix. """ if metric not in ("jaccard", "braycurtis", "cityblock"): raise OneCodexException( "For beta diversity, metric must be one of: jaccard, braycurtis, cityblock" ) # needs read counts, not relative abundances if self._guess_normalized(): raise OneCodexException("Beta diversity requires unnormalized read counts.") df = self.to_df(rank=rank, normalize=False) counts = [] for c_id in df.index: counts.append(df.loc[c_id].tolist()) return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
[ "def", "beta_diversity", "(", "self", ",", "metric", "=", "\"braycurtis\"", ",", "rank", "=", "\"auto\"", ")", ":", "if", "metric", "not", "in", "(", "\"jaccard\"", ",", "\"braycurtis\"", ",", "\"cityblock\"", ")", ":", "raise", "OneCodexException", "(", "\"For beta diversity, metric must be one of: jaccard, braycurtis, cityblock\"", ")", "# needs read counts, not relative abundances", "if", "self", ".", "_guess_normalized", "(", ")", ":", "raise", "OneCodexException", "(", "\"Beta diversity requires unnormalized read counts.\"", ")", "df", "=", "self", ".", "to_df", "(", "rank", "=", "rank", ",", "normalize", "=", "False", ")", "counts", "=", "[", "]", "for", "c_id", "in", "df", ".", "index", ":", "counts", ".", "append", "(", "df", ".", "loc", "[", "c_id", "]", ".", "tolist", "(", ")", ")", "return", "skbio", ".", "diversity", ".", "beta_diversity", "(", "metric", ",", "counts", ",", "df", ".", "index", ".", "tolist", "(", ")", ")" ]
Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix.
[ "Calculate", "the", "diversity", "between", "two", "communities", "." ]
python
train
humilis/humilis-lambdautils
lambdautils/utils.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/utils.py#L136-L161
def annotate_event(ev, key, ts=None, namespace=None, **kwargs): """Add an annotation to an event.""" ann = {} if ts is None: ts = time.time() ann["ts"] = ts ann["key"] = key if namespace is None and "HUMILIS_ENVIRONMENT" in os.environ: namespace = "{}:{}:{}".format( os.environ.get("HUMILIS_ENVIRONMENT"), os.environ.get("HUMILIS_LAYER"), os.environ.get("HUMILIS_STAGE")) if namespace is not None: ann["namespace"] = namespace ann.update(kwargs) _humilis = ev.get("_humilis", {}) if not _humilis: ev["_humilis"] = {"annotation": [ann]} else: ev["_humilis"]["annotation"] = _humilis.get("annotation", []) # Clean up previous annotations with the same key delete_annotations(ev, key) ev["_humilis"]["annotation"].append(ann) return ev
[ "def", "annotate_event", "(", "ev", ",", "key", ",", "ts", "=", "None", ",", "namespace", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ann", "=", "{", "}", "if", "ts", "is", "None", ":", "ts", "=", "time", ".", "time", "(", ")", "ann", "[", "\"ts\"", "]", "=", "ts", "ann", "[", "\"key\"", "]", "=", "key", "if", "namespace", "is", "None", "and", "\"HUMILIS_ENVIRONMENT\"", "in", "os", ".", "environ", ":", "namespace", "=", "\"{}:{}:{}\"", ".", "format", "(", "os", ".", "environ", ".", "get", "(", "\"HUMILIS_ENVIRONMENT\"", ")", ",", "os", ".", "environ", ".", "get", "(", "\"HUMILIS_LAYER\"", ")", ",", "os", ".", "environ", ".", "get", "(", "\"HUMILIS_STAGE\"", ")", ")", "if", "namespace", "is", "not", "None", ":", "ann", "[", "\"namespace\"", "]", "=", "namespace", "ann", ".", "update", "(", "kwargs", ")", "_humilis", "=", "ev", ".", "get", "(", "\"_humilis\"", ",", "{", "}", ")", "if", "not", "_humilis", ":", "ev", "[", "\"_humilis\"", "]", "=", "{", "\"annotation\"", ":", "[", "ann", "]", "}", "else", ":", "ev", "[", "\"_humilis\"", "]", "[", "\"annotation\"", "]", "=", "_humilis", ".", "get", "(", "\"annotation\"", ",", "[", "]", ")", "# Clean up previous annotations with the same key", "delete_annotations", "(", "ev", ",", "key", ")", "ev", "[", "\"_humilis\"", "]", "[", "\"annotation\"", "]", ".", "append", "(", "ann", ")", "return", "ev" ]
Add an annotation to an event.
[ "Add", "an", "annotation", "to", "an", "event", "." ]
python
train
solocompt/plugs-mail
plugs_mail/management/commands/load_email_templates.py
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L171-L180
def open_file(self, file_): """ Receives a file path has input and returns a string with the contents of the file """ with open(file_, 'r', encoding='utf-8') as file: text = '' for line in file: text += line return text
[ "def", "open_file", "(", "self", ",", "file_", ")", ":", "with", "open", "(", "file_", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "text", "=", "''", "for", "line", "in", "file", ":", "text", "+=", "line", "return", "text" ]
Receives a file path has input and returns a string with the contents of the file
[ "Receives", "a", "file", "path", "has", "input", "and", "returns", "a", "string", "with", "the", "contents", "of", "the", "file" ]
python
train
gregarmer/trunserver
trunserv/autoreload.py
https://github.com/gregarmer/trunserver/blob/80906fa5331f4a35260f35c0082cf4dd299543ea/trunserv/autoreload.py#L71-L82
def reloader_thread(softexit=False): """If ``soft_exit`` is True, we use sys.exit(); otherwise ``os_exit`` will be used to end the process. """ while RUN_RELOADER: if code_changed(): # force reload if softexit: sys.exit(3) else: os._exit(3) time.sleep(1)
[ "def", "reloader_thread", "(", "softexit", "=", "False", ")", ":", "while", "RUN_RELOADER", ":", "if", "code_changed", "(", ")", ":", "# force reload", "if", "softexit", ":", "sys", ".", "exit", "(", "3", ")", "else", ":", "os", ".", "_exit", "(", "3", ")", "time", ".", "sleep", "(", "1", ")" ]
If ``soft_exit`` is True, we use sys.exit(); otherwise ``os_exit`` will be used to end the process.
[ "If", "soft_exit", "is", "True", "we", "use", "sys", ".", "exit", "()", ";", "otherwise", "os_exit", "will", "be", "used", "to", "end", "the", "process", "." ]
python
train
DataKitchen/DKCloudCommand
DKCloudCommand/modules/DKCloudAPI.py
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudAPI.py#L904-L935
def recipe_tree(self, kitchen, recipe): """ gets the status of a recipe :param self: DKCloudAPI :param kitchen: string :param recipe: string :rtype: dict """ rc = DKReturnCode() if kitchen is None or isinstance(kitchen, basestring) is False: rc.set(rc.DK_FAIL, 'issue with kitchen parameter') return rc if recipe is None or isinstance(recipe, basestring) is False: rc.set(rc.DK_FAIL, 'issue with recipe parameter') return rc url = '%s/v2/recipe/tree/%s/%s' % (self.get_url_for_direct_rest_call(), kitchen, recipe) try: response = requests.get(url, headers=self._get_common_headers()) rdict = self._get_json(response) pass except (RequestException, ValueError, TypeError), c: s = "recipe_tree: exception: %s" % str(c) rc.set(rc.DK_FAIL, s) return rc if DKCloudAPI._valid_response(response): remote_sha = rdict['recipes'][recipe] rc.set(rc.DK_SUCCESS, None, remote_sha) else: arc = DKAPIReturnCode(rdict, response) rc.set(rc.DK_FAIL, arc.get_message()) return rc
[ "def", "recipe_tree", "(", "self", ",", "kitchen", ",", "recipe", ")", ":", "rc", "=", "DKReturnCode", "(", ")", "if", "kitchen", "is", "None", "or", "isinstance", "(", "kitchen", ",", "basestring", ")", "is", "False", ":", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "'issue with kitchen parameter'", ")", "return", "rc", "if", "recipe", "is", "None", "or", "isinstance", "(", "recipe", ",", "basestring", ")", "is", "False", ":", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "'issue with recipe parameter'", ")", "return", "rc", "url", "=", "'%s/v2/recipe/tree/%s/%s'", "%", "(", "self", ".", "get_url_for_direct_rest_call", "(", ")", ",", "kitchen", ",", "recipe", ")", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_get_common_headers", "(", ")", ")", "rdict", "=", "self", ".", "_get_json", "(", "response", ")", "pass", "except", "(", "RequestException", ",", "ValueError", ",", "TypeError", ")", ",", "c", ":", "s", "=", "\"recipe_tree: exception: %s\"", "%", "str", "(", "c", ")", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "s", ")", "return", "rc", "if", "DKCloudAPI", ".", "_valid_response", "(", "response", ")", ":", "remote_sha", "=", "rdict", "[", "'recipes'", "]", "[", "recipe", "]", "rc", ".", "set", "(", "rc", ".", "DK_SUCCESS", ",", "None", ",", "remote_sha", ")", "else", ":", "arc", "=", "DKAPIReturnCode", "(", "rdict", ",", "response", ")", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "arc", ".", "get_message", "(", ")", ")", "return", "rc" ]
gets the status of a recipe :param self: DKCloudAPI :param kitchen: string :param recipe: string :rtype: dict
[ "gets", "the", "status", "of", "a", "recipe", ":", "param", "self", ":", "DKCloudAPI", ":", "param", "kitchen", ":", "string", ":", "param", "recipe", ":", "string", ":", "rtype", ":", "dict" ]
python
train
JohnVinyard/zounds
zounds/spectral/frequencyscale.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/spectral/frequencyscale.py#L241-L247
def Q(self): """ The quality factor of the scale, or, the ratio of center frequencies to bandwidths """ return np.array(list(self.center_frequencies)) \ / np.array(list(self.bandwidths))
[ "def", "Q", "(", "self", ")", ":", "return", "np", ".", "array", "(", "list", "(", "self", ".", "center_frequencies", ")", ")", "/", "np", ".", "array", "(", "list", "(", "self", ".", "bandwidths", ")", ")" ]
The quality factor of the scale, or, the ratio of center frequencies to bandwidths
[ "The", "quality", "factor", "of", "the", "scale", "or", "the", "ratio", "of", "center", "frequencies", "to", "bandwidths" ]
python
train
Azure/azure-cosmos-table-python
azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py#L163-L212
def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1): ''' Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK using either the specified KEK or the key returned by the key_resolver. Properties specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings. :param entity: The entity being retrieved and decrypted. Could be a dict or an entity object. :param list encrypted_properties_list: The encrypted list of all the properties that are encrypted. :param bytes[] content_encryption_key: The key used internally to encrypt the entity. Extrated from the entity metadata. :param bytes[] entityIV: The intialization vector used to seed the encryption algorithm. Extracted from the entity metadata. :return: The decrypted entity :rtype: Entity ''' _validate_not_none('entity', entity) decrypted_entity = deepcopy(entity) try: for property in entity.keys(): if property in encrypted_properties_list: value = entity[property] propertyIV = _generate_property_iv(entityIV, entity['PartitionKey'], entity['RowKey'], property, isJavaV1) cipher = _generate_AES_CBC_cipher(content_encryption_key, propertyIV) # Decrypt the property. decryptor = cipher.decryptor() decrypted_data = (decryptor.update(value.value) + decryptor.finalize()) # Unpad the data. unpadder = PKCS7(128).unpadder() decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) decrypted_data = decrypted_data.decode('utf-8') decrypted_entity[property] = decrypted_data decrypted_entity.pop('_ClientEncryptionMetadata1') decrypted_entity.pop('_ClientEncryptionMetadata2') return decrypted_entity except: raise AzureException(_ERROR_DECRYPTION_FAILURE)
[ "def", "_decrypt_entity", "(", "entity", ",", "encrypted_properties_list", ",", "content_encryption_key", ",", "entityIV", ",", "isJavaV1", ")", ":", "_validate_not_none", "(", "'entity'", ",", "entity", ")", "decrypted_entity", "=", "deepcopy", "(", "entity", ")", "try", ":", "for", "property", "in", "entity", ".", "keys", "(", ")", ":", "if", "property", "in", "encrypted_properties_list", ":", "value", "=", "entity", "[", "property", "]", "propertyIV", "=", "_generate_property_iv", "(", "entityIV", ",", "entity", "[", "'PartitionKey'", "]", ",", "entity", "[", "'RowKey'", "]", ",", "property", ",", "isJavaV1", ")", "cipher", "=", "_generate_AES_CBC_cipher", "(", "content_encryption_key", ",", "propertyIV", ")", "# Decrypt the property.", "decryptor", "=", "cipher", ".", "decryptor", "(", ")", "decrypted_data", "=", "(", "decryptor", ".", "update", "(", "value", ".", "value", ")", "+", "decryptor", ".", "finalize", "(", ")", ")", "# Unpad the data.", "unpadder", "=", "PKCS7", "(", "128", ")", ".", "unpadder", "(", ")", "decrypted_data", "=", "(", "unpadder", ".", "update", "(", "decrypted_data", ")", "+", "unpadder", ".", "finalize", "(", ")", ")", "decrypted_data", "=", "decrypted_data", ".", "decode", "(", "'utf-8'", ")", "decrypted_entity", "[", "property", "]", "=", "decrypted_data", "decrypted_entity", ".", "pop", "(", "'_ClientEncryptionMetadata1'", ")", "decrypted_entity", ".", "pop", "(", "'_ClientEncryptionMetadata2'", ")", "return", "decrypted_entity", "except", ":", "raise", "AzureException", "(", "_ERROR_DECRYPTION_FAILURE", ")" ]
Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK using either the specified KEK or the key returned by the key_resolver. Properties specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings. :param entity: The entity being retrieved and decrypted. Could be a dict or an entity object. :param list encrypted_properties_list: The encrypted list of all the properties that are encrypted. :param bytes[] content_encryption_key: The key used internally to encrypt the entity. Extrated from the entity metadata. :param bytes[] entityIV: The intialization vector used to seed the encryption algorithm. Extracted from the entity metadata. :return: The decrypted entity :rtype: Entity
[ "Decrypts", "the", "specified", "entity", "using", "AES256", "in", "CBC", "mode", "with", "128", "bit", "padding", ".", "Unwraps", "the", "CEK", "using", "either", "the", "specified", "KEK", "or", "the", "key", "returned", "by", "the", "key_resolver", ".", "Properties", "specified", "in", "the", "encrypted_properties_list", "will", "be", "decrypted", "and", "decoded", "to", "utf", "-", "8", "strings", "." ]
python
train
welbornprod/colr
colr/progress.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/progress.py#L754-L760
def update(self, percent=None, text=None): """ Update the progress bar percentage and message. """ if percent is not None: self.percent = percent if text is not None: self.message = text super().update()
[ "def", "update", "(", "self", ",", "percent", "=", "None", ",", "text", "=", "None", ")", ":", "if", "percent", "is", "not", "None", ":", "self", ".", "percent", "=", "percent", "if", "text", "is", "not", "None", ":", "self", ".", "message", "=", "text", "super", "(", ")", ".", "update", "(", ")" ]
Update the progress bar percentage and message.
[ "Update", "the", "progress", "bar", "percentage", "and", "message", "." ]
python
train
CZ-NIC/yangson
yangson/datatype.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/datatype.py#L182-L184
def _handle_properties(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle type substatements.""" self._handle_restrictions(stmt, sctx)
[ "def", "_handle_properties", "(", "self", ",", "stmt", ":", "Statement", ",", "sctx", ":", "SchemaContext", ")", "->", "None", ":", "self", ".", "_handle_restrictions", "(", "stmt", ",", "sctx", ")" ]
Handle type substatements.
[ "Handle", "type", "substatements", "." ]
python
train
sbg/sevenbridges-python
sevenbridges/models/automation.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/automation.py#L104-L122
def query(cls, automation=None, offset=None, limit=None, api=None): """ Query (List) apps. :param automation: Automation id. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ automation_id = Transform.to_automation(automation) api = api or cls._API return super(AutomationMember, cls)._query( url=cls._URL['query'].format(automation_id=automation_id), automation_id=automation_id, offset=offset, limit=limit, api=api, )
[ "def", "query", "(", "cls", ",", "automation", "=", "None", ",", "offset", "=", "None", ",", "limit", "=", "None", ",", "api", "=", "None", ")", ":", "automation_id", "=", "Transform", ".", "to_automation", "(", "automation", ")", "api", "=", "api", "or", "cls", ".", "_API", "return", "super", "(", "AutomationMember", ",", "cls", ")", ".", "_query", "(", "url", "=", "cls", ".", "_URL", "[", "'query'", "]", ".", "format", "(", "automation_id", "=", "automation_id", ")", ",", "automation_id", "=", "automation_id", ",", "offset", "=", "offset", ",", "limit", "=", "limit", ",", "api", "=", "api", ",", ")" ]
Query (List) apps. :param automation: Automation id. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object
[ "Query", "(", "List", ")", "apps", ".", ":", "param", "automation", ":", "Automation", "id", ".", ":", "param", "offset", ":", "Pagination", "offset", ".", ":", "param", "limit", ":", "Pagination", "limit", ".", ":", "param", "api", ":", "Api", "instance", ".", ":", "return", ":", "collection", "object" ]
python
train
quodlibet/mutagen
mutagen/_tools/mid3v2.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_tools/mid3v2.py#L115-L126
def frame_from_fsnative(arg): """Takes item from argv and returns ascii native str or raises ValueError. """ assert isinstance(arg, fsnative) text = fsn2text(arg, strict=True) if PY2: return text.encode("ascii") else: return text.encode("ascii").decode("ascii")
[ "def", "frame_from_fsnative", "(", "arg", ")", ":", "assert", "isinstance", "(", "arg", ",", "fsnative", ")", "text", "=", "fsn2text", "(", "arg", ",", "strict", "=", "True", ")", "if", "PY2", ":", "return", "text", ".", "encode", "(", "\"ascii\"", ")", "else", ":", "return", "text", ".", "encode", "(", "\"ascii\"", ")", ".", "decode", "(", "\"ascii\"", ")" ]
Takes item from argv and returns ascii native str or raises ValueError.
[ "Takes", "item", "from", "argv", "and", "returns", "ascii", "native", "str", "or", "raises", "ValueError", "." ]
python
train
bachya/regenmaschine
regenmaschine/watering.py
https://github.com/bachya/regenmaschine/blob/99afb648fe454dc4a7d5db85a02a8b3b5d26f8bc/regenmaschine/watering.py#L13-L28
async def log( self, date: datetime.date = None, days: int = None, details: bool = False) -> list: """Get watering information for X days from Y date.""" endpoint = 'watering/log' if details: endpoint += '/details' if date and days: endpoint = '{0}/{1}/{2}'.format( endpoint, date.strftime('%Y-%m-%d'), days) data = await self._request('get', endpoint) return data['waterLog']['days']
[ "async", "def", "log", "(", "self", ",", "date", ":", "datetime", ".", "date", "=", "None", ",", "days", ":", "int", "=", "None", ",", "details", ":", "bool", "=", "False", ")", "->", "list", ":", "endpoint", "=", "'watering/log'", "if", "details", ":", "endpoint", "+=", "'/details'", "if", "date", "and", "days", ":", "endpoint", "=", "'{0}/{1}/{2}'", ".", "format", "(", "endpoint", ",", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "days", ")", "data", "=", "await", "self", ".", "_request", "(", "'get'", ",", "endpoint", ")", "return", "data", "[", "'waterLog'", "]", "[", "'days'", "]" ]
Get watering information for X days from Y date.
[ "Get", "watering", "information", "for", "X", "days", "from", "Y", "date", "." ]
python
train
DemocracyClub/uk-election-ids
uk_election_ids/election_ids.py
https://github.com/DemocracyClub/uk-election-ids/blob/566895e15b539e8a7fa3bebb680d5cd326cf6b6b/uk_election_ids/election_ids.py#L204-L215
def subtype_group_id(self): """ str: Subtype Group ID """ self._validate() self._validate_for_subtype_group_id() parts = [] parts.append(self.election_type) parts.append(self.subtype) parts.append(self.date) return ".".join(parts)
[ "def", "subtype_group_id", "(", "self", ")", ":", "self", ".", "_validate", "(", ")", "self", ".", "_validate_for_subtype_group_id", "(", ")", "parts", "=", "[", "]", "parts", ".", "append", "(", "self", ".", "election_type", ")", "parts", ".", "append", "(", "self", ".", "subtype", ")", "parts", ".", "append", "(", "self", ".", "date", ")", "return", "\".\"", ".", "join", "(", "parts", ")" ]
str: Subtype Group ID
[ "str", ":", "Subtype", "Group", "ID" ]
python
train
thunder-project/thunder
thunder/series/series.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L807-L835
def _makewindows(self, indices, window): """ Make masks used by windowing functions Given a list of indices specifying window centers, and a window size, construct a list of index arrays, one per window, that index into the target array Parameters ---------- indices : array-like List of times specifying window centers window : int Window size """ div = divmod(window, 2) before = div[0] after = div[0] + div[1] index = asarray(self.index) indices = asarray(indices) if where(index == max(indices))[0][0] + after > len(index): raise ValueError("Maximum requested index %g, with window %g, exceeds length %g" % (max(indices), window, len(index))) if where(index == min(indices))[0][0] - before < 0: raise ValueError("Minimum requested index %g, with window %g, is less than 0" % (min(indices), window)) masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices] return masks
[ "def", "_makewindows", "(", "self", ",", "indices", ",", "window", ")", ":", "div", "=", "divmod", "(", "window", ",", "2", ")", "before", "=", "div", "[", "0", "]", "after", "=", "div", "[", "0", "]", "+", "div", "[", "1", "]", "index", "=", "asarray", "(", "self", ".", "index", ")", "indices", "=", "asarray", "(", "indices", ")", "if", "where", "(", "index", "==", "max", "(", "indices", ")", ")", "[", "0", "]", "[", "0", "]", "+", "after", ">", "len", "(", "index", ")", ":", "raise", "ValueError", "(", "\"Maximum requested index %g, with window %g, exceeds length %g\"", "%", "(", "max", "(", "indices", ")", ",", "window", ",", "len", "(", "index", ")", ")", ")", "if", "where", "(", "index", "==", "min", "(", "indices", ")", ")", "[", "0", "]", "[", "0", "]", "-", "before", "<", "0", ":", "raise", "ValueError", "(", "\"Minimum requested index %g, with window %g, is less than 0\"", "%", "(", "min", "(", "indices", ")", ",", "window", ")", ")", "masks", "=", "[", "arange", "(", "where", "(", "index", "==", "i", ")", "[", "0", "]", "[", "0", "]", "-", "before", ",", "where", "(", "index", "==", "i", ")", "[", "0", "]", "[", "0", "]", "+", "after", ",", "dtype", "=", "'int'", ")", "for", "i", "in", "indices", "]", "return", "masks" ]
Make masks used by windowing functions Given a list of indices specifying window centers, and a window size, construct a list of index arrays, one per window, that index into the target array Parameters ---------- indices : array-like List of times specifying window centers window : int Window size
[ "Make", "masks", "used", "by", "windowing", "functions" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3144-L3195
def batch_dense(inputs, units, activation=None, kernel_initializer=None, reuse=None, name=None): """Multiply a batch of input matrices by a batch of parameter matrices. Each input matrix is multiplied by the corresponding parameter matrix. This is useful in a mixture-of-experts where the batch represents different experts with different inputs. Args: inputs: a Tensor with shape [batch, length, input_units] units: an integer activation: an optional activation function to apply to the output kernel_initializer: an optional initializer reuse: whether to reuse the varaible scope name: an optional string Returns: a Tensor with shape [batch, length, units] Raises: ValueError: if the "batch" or "input_units" dimensions of inputs are not statically known. """ inputs_shape = shape_list(inputs) if len(inputs_shape) != 3: raise ValueError("inputs must have 3 dimensions") batch = inputs_shape[0] input_units = inputs_shape[2] if not isinstance(batch, int) or not isinstance(input_units, int): raise ValueError("inputs must have static dimensions 0 and 2") with tf.variable_scope( name, default_name="batch_dense", values=[inputs], reuse=reuse, dtype=inputs.dtype): if kernel_initializer is None: kernel_initializer = tf.random_normal_initializer( stddev=input_units**-0.5) w = tf.get_variable( "w", [batch, input_units, units], initializer=kernel_initializer, dtype=inputs.dtype) y = tf.matmul(inputs, w) if activation is not None: y = activation(y) return y
[ "def", "batch_dense", "(", "inputs", ",", "units", ",", "activation", "=", "None", ",", "kernel_initializer", "=", "None", ",", "reuse", "=", "None", ",", "name", "=", "None", ")", ":", "inputs_shape", "=", "shape_list", "(", "inputs", ")", "if", "len", "(", "inputs_shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"inputs must have 3 dimensions\"", ")", "batch", "=", "inputs_shape", "[", "0", "]", "input_units", "=", "inputs_shape", "[", "2", "]", "if", "not", "isinstance", "(", "batch", ",", "int", ")", "or", "not", "isinstance", "(", "input_units", ",", "int", ")", ":", "raise", "ValueError", "(", "\"inputs must have static dimensions 0 and 2\"", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"batch_dense\"", ",", "values", "=", "[", "inputs", "]", ",", "reuse", "=", "reuse", ",", "dtype", "=", "inputs", ".", "dtype", ")", ":", "if", "kernel_initializer", "is", "None", ":", "kernel_initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "input_units", "**", "-", "0.5", ")", "w", "=", "tf", ".", "get_variable", "(", "\"w\"", ",", "[", "batch", ",", "input_units", ",", "units", "]", ",", "initializer", "=", "kernel_initializer", ",", "dtype", "=", "inputs", ".", "dtype", ")", "y", "=", "tf", ".", "matmul", "(", "inputs", ",", "w", ")", "if", "activation", "is", "not", "None", ":", "y", "=", "activation", "(", "y", ")", "return", "y" ]
Multiply a batch of input matrices by a batch of parameter matrices. Each input matrix is multiplied by the corresponding parameter matrix. This is useful in a mixture-of-experts where the batch represents different experts with different inputs. Args: inputs: a Tensor with shape [batch, length, input_units] units: an integer activation: an optional activation function to apply to the output kernel_initializer: an optional initializer reuse: whether to reuse the varaible scope name: an optional string Returns: a Tensor with shape [batch, length, units] Raises: ValueError: if the "batch" or "input_units" dimensions of inputs are not statically known.
[ "Multiply", "a", "batch", "of", "input", "matrices", "by", "a", "batch", "of", "parameter", "matrices", "." ]
python
train
O365/python-o365
O365/connection.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/connection.py#L509-L536
def refresh_token(self): """ Refresh the OAuth authorization token. This will be called automatically when the access token expires, however, you can manually call this method to request a new refresh token. :return bool: Success / Failure """ if self.session is None: self.session = self.get_session() token = self.token_backend.token if token and token.is_long_lived: client_id, client_secret = self.auth token = Token(self.session.refresh_token( self._oauth2_token_url, client_id=client_id, client_secret=client_secret)) else: log.error('You can not refresh an access token that has no "refreh_token" available.' 'Include "offline_access" scope when authentication to get a "refresh_token"') return False self.token_backend.token = token if self.store_token: self.token_backend.save_token() return True
[ "def", "refresh_token", "(", "self", ")", ":", "if", "self", ".", "session", "is", "None", ":", "self", ".", "session", "=", "self", ".", "get_session", "(", ")", "token", "=", "self", ".", "token_backend", ".", "token", "if", "token", "and", "token", ".", "is_long_lived", ":", "client_id", ",", "client_secret", "=", "self", ".", "auth", "token", "=", "Token", "(", "self", ".", "session", ".", "refresh_token", "(", "self", ".", "_oauth2_token_url", ",", "client_id", "=", "client_id", ",", "client_secret", "=", "client_secret", ")", ")", "else", ":", "log", ".", "error", "(", "'You can not refresh an access token that has no \"refreh_token\" available.'", "'Include \"offline_access\" scope when authentication to get a \"refresh_token\"'", ")", "return", "False", "self", ".", "token_backend", ".", "token", "=", "token", "if", "self", ".", "store_token", ":", "self", ".", "token_backend", ".", "save_token", "(", ")", "return", "True" ]
Refresh the OAuth authorization token. This will be called automatically when the access token expires, however, you can manually call this method to request a new refresh token. :return bool: Success / Failure
[ "Refresh", "the", "OAuth", "authorization", "token", ".", "This", "will", "be", "called", "automatically", "when", "the", "access", "token", "expires", "however", "you", "can", "manually", "call", "this", "method", "to", "request", "a", "new", "refresh", "token", ".", ":", "return", "bool", ":", "Success", "/", "Failure" ]
python
train
ruipgil/TrackToTrip
tracktotrip/segment.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L20-L39
def remove_liers(points): """ Removes obvious noise points Checks time consistency, removing points that appear out of order Args: points (:obj:`list` of :obj:`Point`) Returns: :obj:`list` of :obj:`Point` """ result = [points[0]] for i in range(1, len(points) - 2): prv = points[i-1] crr = points[i] nxt = points[i+1] if prv.time <= crr.time and crr.time <= nxt.time: result.append(crr) result.append(points[-1]) return result
[ "def", "remove_liers", "(", "points", ")", ":", "result", "=", "[", "points", "[", "0", "]", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "points", ")", "-", "2", ")", ":", "prv", "=", "points", "[", "i", "-", "1", "]", "crr", "=", "points", "[", "i", "]", "nxt", "=", "points", "[", "i", "+", "1", "]", "if", "prv", ".", "time", "<=", "crr", ".", "time", "and", "crr", ".", "time", "<=", "nxt", ".", "time", ":", "result", ".", "append", "(", "crr", ")", "result", ".", "append", "(", "points", "[", "-", "1", "]", ")", "return", "result" ]
Removes obvious noise points Checks time consistency, removing points that appear out of order Args: points (:obj:`list` of :obj:`Point`) Returns: :obj:`list` of :obj:`Point`
[ "Removes", "obvious", "noise", "points" ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L4620-L4654
def _get_policy_dict(policy): '''Returns a dictionary representation of a policy''' profile_dict = {'name': policy.name, 'description': policy.description, 'resource_type': policy.resourceType.resourceType} subprofile_dicts = [] if isinstance(policy, pbm.profile.CapabilityBasedProfile) and \ isinstance(policy.constraints, pbm.profile.SubProfileCapabilityConstraints): for subprofile in policy.constraints.subProfiles: subprofile_dict = {'name': subprofile.name, 'force_provision': subprofile.forceProvision} cap_dicts = [] for cap in subprofile.capability: cap_dict = {'namespace': cap.id.namespace, 'id': cap.id.id} # We assume there is one constraint with one value set val = cap.constraint[0].propertyInstance[0].value if isinstance(val, pbm.capability.types.Range): val_dict = {'type': 'range', 'min': val.min, 'max': val.max} elif isinstance(val, pbm.capability.types.DiscreteSet): val_dict = {'type': 'set', 'values': val.values} else: val_dict = {'type': 'scalar', 'value': val} cap_dict['setting'] = val_dict cap_dicts.append(cap_dict) subprofile_dict['capabilities'] = cap_dicts subprofile_dicts.append(subprofile_dict) profile_dict['subprofiles'] = subprofile_dicts return profile_dict
[ "def", "_get_policy_dict", "(", "policy", ")", ":", "profile_dict", "=", "{", "'name'", ":", "policy", ".", "name", ",", "'description'", ":", "policy", ".", "description", ",", "'resource_type'", ":", "policy", ".", "resourceType", ".", "resourceType", "}", "subprofile_dicts", "=", "[", "]", "if", "isinstance", "(", "policy", ",", "pbm", ".", "profile", ".", "CapabilityBasedProfile", ")", "and", "isinstance", "(", "policy", ".", "constraints", ",", "pbm", ".", "profile", ".", "SubProfileCapabilityConstraints", ")", ":", "for", "subprofile", "in", "policy", ".", "constraints", ".", "subProfiles", ":", "subprofile_dict", "=", "{", "'name'", ":", "subprofile", ".", "name", ",", "'force_provision'", ":", "subprofile", ".", "forceProvision", "}", "cap_dicts", "=", "[", "]", "for", "cap", "in", "subprofile", ".", "capability", ":", "cap_dict", "=", "{", "'namespace'", ":", "cap", ".", "id", ".", "namespace", ",", "'id'", ":", "cap", ".", "id", ".", "id", "}", "# We assume there is one constraint with one value set", "val", "=", "cap", ".", "constraint", "[", "0", "]", ".", "propertyInstance", "[", "0", "]", ".", "value", "if", "isinstance", "(", "val", ",", "pbm", ".", "capability", ".", "types", ".", "Range", ")", ":", "val_dict", "=", "{", "'type'", ":", "'range'", ",", "'min'", ":", "val", ".", "min", ",", "'max'", ":", "val", ".", "max", "}", "elif", "isinstance", "(", "val", ",", "pbm", ".", "capability", ".", "types", ".", "DiscreteSet", ")", ":", "val_dict", "=", "{", "'type'", ":", "'set'", ",", "'values'", ":", "val", ".", "values", "}", "else", ":", "val_dict", "=", "{", "'type'", ":", "'scalar'", ",", "'value'", ":", "val", "}", "cap_dict", "[", "'setting'", "]", "=", "val_dict", "cap_dicts", ".", "append", "(", "cap_dict", ")", "subprofile_dict", "[", "'capabilities'", "]", "=", "cap_dicts", "subprofile_dicts", ".", "append", "(", "subprofile_dict", ")", "profile_dict", "[", "'subprofiles'", "]", "=", "subprofile_dicts", "return", "profile_dict" ]
Returns a dictionary representation of a policy
[ "Returns", "a", "dictionary", "representation", "of", "a", "policy" ]
python
train
xolox/python-vcs-repo-mgr
vcs_repo_mgr/__init__.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/__init__.py#L1213-L1222
def ensure_exists(self): """ Make sure the local repository exists. :raises: :exc:`~exceptions.ValueError` when the local repository doesn't exist yet. """ if not self.exists: msg = "The local %s repository %s doesn't exist!" raise ValueError(msg % (self.friendly_name, format_path(self.local)))
[ "def", "ensure_exists", "(", "self", ")", ":", "if", "not", "self", ".", "exists", ":", "msg", "=", "\"The local %s repository %s doesn't exist!\"", "raise", "ValueError", "(", "msg", "%", "(", "self", ".", "friendly_name", ",", "format_path", "(", "self", ".", "local", ")", ")", ")" ]
Make sure the local repository exists. :raises: :exc:`~exceptions.ValueError` when the local repository doesn't exist yet.
[ "Make", "sure", "the", "local", "repository", "exists", "." ]
python
train
celery/django-celery
djcelery/loaders.py
https://github.com/celery/django-celery/blob/5d1ecb09c6304d22cc447c7c08fba0bd1febc2ef/djcelery/loaders.py#L181-L202
def find_related_module(app, related_name): """Given an application name and a module name, tries to find that module in the application.""" try: app_path = importlib.import_module(app).__path__ except ImportError as exc: warn('Autodiscover: Error importing %s.%s: %r' % ( app, related_name, exc, )) return except AttributeError: return try: f, _, _ = imp.find_module(related_name, app_path) # f is returned None when app_path is a module f and f.close() except ImportError: return return importlib.import_module('{0}.{1}'.format(app, related_name))
[ "def", "find_related_module", "(", "app", ",", "related_name", ")", ":", "try", ":", "app_path", "=", "importlib", ".", "import_module", "(", "app", ")", ".", "__path__", "except", "ImportError", "as", "exc", ":", "warn", "(", "'Autodiscover: Error importing %s.%s: %r'", "%", "(", "app", ",", "related_name", ",", "exc", ",", ")", ")", "return", "except", "AttributeError", ":", "return", "try", ":", "f", ",", "_", ",", "_", "=", "imp", ".", "find_module", "(", "related_name", ",", "app_path", ")", "# f is returned None when app_path is a module", "f", "and", "f", ".", "close", "(", ")", "except", "ImportError", ":", "return", "return", "importlib", ".", "import_module", "(", "'{0}.{1}'", ".", "format", "(", "app", ",", "related_name", ")", ")" ]
Given an application name and a module name, tries to find that module in the application.
[ "Given", "an", "application", "name", "and", "a", "module", "name", "tries", "to", "find", "that", "module", "in", "the", "application", "." ]
python
train
ucsb-cs-education/hairball
hairball/plugins/__init__.py
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/__init__.py#L189-L196
def _process(self, scratch, filename, **kwargs): """Internal hook that marks reachable scripts before calling analyze. Returns data exactly as returned by the analyze method. """ self.tag_reachable_scripts(scratch) return self.analyze(scratch, filename=filename, **kwargs)
[ "def", "_process", "(", "self", ",", "scratch", ",", "filename", ",", "*", "*", "kwargs", ")", ":", "self", ".", "tag_reachable_scripts", "(", "scratch", ")", "return", "self", ".", "analyze", "(", "scratch", ",", "filename", "=", "filename", ",", "*", "*", "kwargs", ")" ]
Internal hook that marks reachable scripts before calling analyze. Returns data exactly as returned by the analyze method.
[ "Internal", "hook", "that", "marks", "reachable", "scripts", "before", "calling", "analyze", "." ]
python
train
ssokolow/fastdupes
fastdupes.py
https://github.com/ssokolow/fastdupes/blob/0334545885445834307c075a445fba9fe6f0c9e7/fastdupes.py#L376-L417
def compareChunks(handles, chunk_size=CHUNK_SIZE): """Group a list of file handles based on equality of the next chunk of data read from them. :param handles: A list of open handles for file-like objects with otentially-identical contents. :param chunk_size: The amount of data to read from each handle every time this function is called. :returns: Two lists of lists: * Lists to be fed back into this function individually * Finished groups of duplicate paths. (including unique files as single-file lists) :rtype: ``(list, list)`` .. attention:: File handles will be closed when no longer needed .. todo:: Discard chunk contents immediately once they're no longer needed """ chunks = [(path, fh, fh.read(chunk_size)) for path, fh, _ in handles] more, done = [], [] # While there are combinations not yet tried... while chunks: # Compare the first chunk to all successive chunks matches, non_matches = [chunks[0]], [] for chunk in chunks[1:]: if matches[0][2] == chunk[2]: matches.append(chunk) else: non_matches.append(chunk) # Check for EOF or obviously unique files if len(matches) == 1 or matches[0][2] == "": for x in matches: x[1].close() done.append([x[0] for x in matches]) else: more.append(matches) chunks = non_matches return more, done
[ "def", "compareChunks", "(", "handles", ",", "chunk_size", "=", "CHUNK_SIZE", ")", ":", "chunks", "=", "[", "(", "path", ",", "fh", ",", "fh", ".", "read", "(", "chunk_size", ")", ")", "for", "path", ",", "fh", ",", "_", "in", "handles", "]", "more", ",", "done", "=", "[", "]", ",", "[", "]", "# While there are combinations not yet tried...", "while", "chunks", ":", "# Compare the first chunk to all successive chunks", "matches", ",", "non_matches", "=", "[", "chunks", "[", "0", "]", "]", ",", "[", "]", "for", "chunk", "in", "chunks", "[", "1", ":", "]", ":", "if", "matches", "[", "0", "]", "[", "2", "]", "==", "chunk", "[", "2", "]", ":", "matches", ".", "append", "(", "chunk", ")", "else", ":", "non_matches", ".", "append", "(", "chunk", ")", "# Check for EOF or obviously unique files", "if", "len", "(", "matches", ")", "==", "1", "or", "matches", "[", "0", "]", "[", "2", "]", "==", "\"\"", ":", "for", "x", "in", "matches", ":", "x", "[", "1", "]", ".", "close", "(", ")", "done", ".", "append", "(", "[", "x", "[", "0", "]", "for", "x", "in", "matches", "]", ")", "else", ":", "more", ".", "append", "(", "matches", ")", "chunks", "=", "non_matches", "return", "more", ",", "done" ]
Group a list of file handles based on equality of the next chunk of data read from them. :param handles: A list of open handles for file-like objects with otentially-identical contents. :param chunk_size: The amount of data to read from each handle every time this function is called. :returns: Two lists of lists: * Lists to be fed back into this function individually * Finished groups of duplicate paths. (including unique files as single-file lists) :rtype: ``(list, list)`` .. attention:: File handles will be closed when no longer needed .. todo:: Discard chunk contents immediately once they're no longer needed
[ "Group", "a", "list", "of", "file", "handles", "based", "on", "equality", "of", "the", "next", "chunk", "of", "data", "read", "from", "them", "." ]
python
valid
mongodb/mongo-python-driver
pymongo/collection.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L602-L646
def _insert(self, docs, ordered=True, check_keys=True, manipulate=False, write_concern=None, op_id=None, bypass_doc_val=False, session=None): """Internal insert helper.""" if isinstance(docs, abc.Mapping): return self._insert_one( docs, ordered, check_keys, manipulate, write_concern, op_id, bypass_doc_val, session) ids = [] if manipulate: def gen(): """Generator that applies SON manipulators to each document and adds _id if necessary. """ _db = self.__database for doc in docs: # Apply user-configured SON manipulators. This order of # operations is required for backwards compatibility, # see PYTHON-709. doc = _db._apply_incoming_manipulators(doc, self) if not (isinstance(doc, RawBSONDocument) or '_id' in doc): doc['_id'] = ObjectId() doc = _db._apply_incoming_copying_manipulators(doc, self) ids.append(doc['_id']) yield doc else: def gen(): """Generator that only tracks existing _ids.""" for doc in docs: # Don't inflate RawBSONDocument by touching fields. if not isinstance(doc, RawBSONDocument): ids.append(doc.get('_id')) yield doc write_concern = write_concern or self._write_concern_for(session) blk = _Bulk(self, ordered, bypass_doc_val) blk.ops = [(message._INSERT, doc) for doc in gen()] try: blk.execute(write_concern, session=session) except BulkWriteError as bwe: _raise_last_error(bwe.details) return ids
[ "def", "_insert", "(", "self", ",", "docs", ",", "ordered", "=", "True", ",", "check_keys", "=", "True", ",", "manipulate", "=", "False", ",", "write_concern", "=", "None", ",", "op_id", "=", "None", ",", "bypass_doc_val", "=", "False", ",", "session", "=", "None", ")", ":", "if", "isinstance", "(", "docs", ",", "abc", ".", "Mapping", ")", ":", "return", "self", ".", "_insert_one", "(", "docs", ",", "ordered", ",", "check_keys", ",", "manipulate", ",", "write_concern", ",", "op_id", ",", "bypass_doc_val", ",", "session", ")", "ids", "=", "[", "]", "if", "manipulate", ":", "def", "gen", "(", ")", ":", "\"\"\"Generator that applies SON manipulators to each document\n and adds _id if necessary.\n \"\"\"", "_db", "=", "self", ".", "__database", "for", "doc", "in", "docs", ":", "# Apply user-configured SON manipulators. This order of", "# operations is required for backwards compatibility,", "# see PYTHON-709.", "doc", "=", "_db", ".", "_apply_incoming_manipulators", "(", "doc", ",", "self", ")", "if", "not", "(", "isinstance", "(", "doc", ",", "RawBSONDocument", ")", "or", "'_id'", "in", "doc", ")", ":", "doc", "[", "'_id'", "]", "=", "ObjectId", "(", ")", "doc", "=", "_db", ".", "_apply_incoming_copying_manipulators", "(", "doc", ",", "self", ")", "ids", ".", "append", "(", "doc", "[", "'_id'", "]", ")", "yield", "doc", "else", ":", "def", "gen", "(", ")", ":", "\"\"\"Generator that only tracks existing _ids.\"\"\"", "for", "doc", "in", "docs", ":", "# Don't inflate RawBSONDocument by touching fields.", "if", "not", "isinstance", "(", "doc", ",", "RawBSONDocument", ")", ":", "ids", ".", "append", "(", "doc", ".", "get", "(", "'_id'", ")", ")", "yield", "doc", "write_concern", "=", "write_concern", "or", "self", ".", "_write_concern_for", "(", "session", ")", "blk", "=", "_Bulk", "(", "self", ",", "ordered", ",", "bypass_doc_val", ")", "blk", ".", "ops", "=", "[", "(", "message", ".", "_INSERT", ",", "doc", ")", "for", "doc", "in", "gen", "(", ")", "]", "try", ":", "blk", ".", "execute", "(", "write_concern", ",", "session", "=", "session", ")", "except", "BulkWriteError", "as", "bwe", ":", "_raise_last_error", "(", "bwe", ".", "details", ")", "return", "ids" ]
Internal insert helper.
[ "Internal", "insert", "helper", "." ]
python
train
saltstack/salt
salt/modules/aptpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptpkg.py#L125-L141
def _get_ppa_info_from_launchpad(owner_name, ppa_name): ''' Idea from softwareproperties.ppa. Uses urllib2 which sacrifices server cert verification. This is used as fall-back code or for secure PPAs :param owner_name: :param ppa_name: :return: ''' lp_url = 'https://launchpad.net/api/1.0/~{0}/+archive/{1}'.format( owner_name, ppa_name) request = _Request(lp_url, headers={'Accept': 'application/json'}) lp_page = _urlopen(request) return salt.utils.json.load(lp_page)
[ "def", "_get_ppa_info_from_launchpad", "(", "owner_name", ",", "ppa_name", ")", ":", "lp_url", "=", "'https://launchpad.net/api/1.0/~{0}/+archive/{1}'", ".", "format", "(", "owner_name", ",", "ppa_name", ")", "request", "=", "_Request", "(", "lp_url", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", "}", ")", "lp_page", "=", "_urlopen", "(", "request", ")", "return", "salt", ".", "utils", ".", "json", ".", "load", "(", "lp_page", ")" ]
Idea from softwareproperties.ppa. Uses urllib2 which sacrifices server cert verification. This is used as fall-back code or for secure PPAs :param owner_name: :param ppa_name: :return:
[ "Idea", "from", "softwareproperties", ".", "ppa", ".", "Uses", "urllib2", "which", "sacrifices", "server", "cert", "verification", "." ]
python
train
diging/tethne
tethne/analyze/corpus.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/corpus.py#L30-L88
def _forward(X, s=1.1, gamma=1., k=5): """ Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg (2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_. Parameters ---------- X : list A series of time-gaps between events. s : float (default: 1.1) Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. gamma : float (default: 1.0) Parameter that controls the 'cost' of higher burst states. Higher values make it more 'difficult' to achieve a higher burst state. k : int (default: 5) Number of states. Higher values increase computational cost of the algorithm. A maximum of 25 is suggested by the literature. Returns ------- states : list Optimal state sequence. """ X = list(X) def alpha(i): return (n/T)*(s**i) def tau(i, j): if j > i: return (j-i)*gamma*log(n) return 0. def f(j, x): return alpha(j) * exp(-1. * alpha(j) * x) def C(j, t): if j == 0 and t == 0: return 0. elif t == 0: return float("inf") C_tau = min([C_values[l][t-1] + tau(l, j) for l in xrange(k)]) return (-1. * log(f(j,X[t]))) + C_tau T = sum(X) n = len(X) # C() requires default (0) values, so we construct the "array" in advance. C_values = [[0 for t in xrange(len(X))] for j in xrange(k)] for j in xrange(k): for t in xrange(len(X)): C_values[j][t] = C(j,t) # Find the optimal state sequence. states = [argmin([c[t] for c in C_values]) for t in xrange(n)] return states
[ "def", "_forward", "(", "X", ",", "s", "=", "1.1", ",", "gamma", "=", "1.", ",", "k", "=", "5", ")", ":", "X", "=", "list", "(", "X", ")", "def", "alpha", "(", "i", ")", ":", "return", "(", "n", "/", "T", ")", "*", "(", "s", "**", "i", ")", "def", "tau", "(", "i", ",", "j", ")", ":", "if", "j", ">", "i", ":", "return", "(", "j", "-", "i", ")", "*", "gamma", "*", "log", "(", "n", ")", "return", "0.", "def", "f", "(", "j", ",", "x", ")", ":", "return", "alpha", "(", "j", ")", "*", "exp", "(", "-", "1.", "*", "alpha", "(", "j", ")", "*", "x", ")", "def", "C", "(", "j", ",", "t", ")", ":", "if", "j", "==", "0", "and", "t", "==", "0", ":", "return", "0.", "elif", "t", "==", "0", ":", "return", "float", "(", "\"inf\"", ")", "C_tau", "=", "min", "(", "[", "C_values", "[", "l", "]", "[", "t", "-", "1", "]", "+", "tau", "(", "l", ",", "j", ")", "for", "l", "in", "xrange", "(", "k", ")", "]", ")", "return", "(", "-", "1.", "*", "log", "(", "f", "(", "j", ",", "X", "[", "t", "]", ")", ")", ")", "+", "C_tau", "T", "=", "sum", "(", "X", ")", "n", "=", "len", "(", "X", ")", "# C() requires default (0) values, so we construct the \"array\" in advance.", "C_values", "=", "[", "[", "0", "for", "t", "in", "xrange", "(", "len", "(", "X", ")", ")", "]", "for", "j", "in", "xrange", "(", "k", ")", "]", "for", "j", "in", "xrange", "(", "k", ")", ":", "for", "t", "in", "xrange", "(", "len", "(", "X", ")", ")", ":", "C_values", "[", "j", "]", "[", "t", "]", "=", "C", "(", "j", ",", "t", ")", "# Find the optimal state sequence.", "states", "=", "[", "argmin", "(", "[", "c", "[", "t", "]", "for", "c", "in", "C_values", "]", ")", "for", "t", "in", "xrange", "(", "n", ")", "]", "return", "states" ]
Forward dynamic algorithm for burstness automaton HMM, from `Kleinberg (2002) <http://www.cs.cornell.edu/home/kleinber/bhs.pdf>`_. Parameters ---------- X : list A series of time-gaps between events. s : float (default: 1.1) Scaling parameter ( > 1.)that controls graininess of burst detection. Lower values make the model more sensitive. gamma : float (default: 1.0) Parameter that controls the 'cost' of higher burst states. Higher values make it more 'difficult' to achieve a higher burst state. k : int (default: 5) Number of states. Higher values increase computational cost of the algorithm. A maximum of 25 is suggested by the literature. Returns ------- states : list Optimal state sequence.
[ "Forward", "dynamic", "algorithm", "for", "burstness", "automaton", "HMM", "from", "Kleinberg", "(", "2002", ")", "<http", ":", "//", "www", ".", "cs", ".", "cornell", ".", "edu", "/", "home", "/", "kleinber", "/", "bhs", ".", "pdf", ">", "_", "." ]
python
train
Clinical-Genomics/scout
scout/update/panel.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/update/panel.py#L9-L45
def update_panel(adapter, panel_name, panel_version, new_version=None, new_date=None): """Update a gene panel in the database We need to update the actual gene panel and then all cases that refers to the panel. Args: adapter(scout.adapter.MongoAdapter) panel_name(str): Unique name for a gene panel panel_version(float) new_version(float) new_date(datetime.datetime) Returns: updated_panel(scout.models.GenePanel): The updated gene panel object """ panel_obj = adapter.gene_panel(panel_name, panel_version) if not panel_obj: raise IntegrityError("Panel %s version %s does not exist" % (panel_name, panel_version)) updated_panel = adapter.update_panel(panel_obj, new_version, new_date) panel_id = updated_panel['_id'] # We need to alter the embedded panels in all affected cases update = {'$set': {}} if new_version: update['$set']['panels.$.version'] = updated_panel['version'] if new_date: update['$set']['panels.$.updated_at'] = updated_panel['date'] LOG.info('Updating affected cases with {0}'.format(update)) query = {'panels': { '$elemMatch': {'panel_name': panel_name}}} adapter.case_collection.update_many(query, update) return updated_panel
[ "def", "update_panel", "(", "adapter", ",", "panel_name", ",", "panel_version", ",", "new_version", "=", "None", ",", "new_date", "=", "None", ")", ":", "panel_obj", "=", "adapter", ".", "gene_panel", "(", "panel_name", ",", "panel_version", ")", "if", "not", "panel_obj", ":", "raise", "IntegrityError", "(", "\"Panel %s version %s does not exist\"", "%", "(", "panel_name", ",", "panel_version", ")", ")", "updated_panel", "=", "adapter", ".", "update_panel", "(", "panel_obj", ",", "new_version", ",", "new_date", ")", "panel_id", "=", "updated_panel", "[", "'_id'", "]", "# We need to alter the embedded panels in all affected cases", "update", "=", "{", "'$set'", ":", "{", "}", "}", "if", "new_version", ":", "update", "[", "'$set'", "]", "[", "'panels.$.version'", "]", "=", "updated_panel", "[", "'version'", "]", "if", "new_date", ":", "update", "[", "'$set'", "]", "[", "'panels.$.updated_at'", "]", "=", "updated_panel", "[", "'date'", "]", "LOG", ".", "info", "(", "'Updating affected cases with {0}'", ".", "format", "(", "update", ")", ")", "query", "=", "{", "'panels'", ":", "{", "'$elemMatch'", ":", "{", "'panel_name'", ":", "panel_name", "}", "}", "}", "adapter", ".", "case_collection", ".", "update_many", "(", "query", ",", "update", ")", "return", "updated_panel" ]
Update a gene panel in the database We need to update the actual gene panel and then all cases that refers to the panel. Args: adapter(scout.adapter.MongoAdapter) panel_name(str): Unique name for a gene panel panel_version(float) new_version(float) new_date(datetime.datetime) Returns: updated_panel(scout.models.GenePanel): The updated gene panel object
[ "Update", "a", "gene", "panel", "in", "the", "database", "We", "need", "to", "update", "the", "actual", "gene", "panel", "and", "then", "all", "cases", "that", "refers", "to", "the", "panel", ".", "Args", ":", "adapter", "(", "scout", ".", "adapter", ".", "MongoAdapter", ")", "panel_name", "(", "str", ")", ":", "Unique", "name", "for", "a", "gene", "panel", "panel_version", "(", "float", ")", "new_version", "(", "float", ")", "new_date", "(", "datetime", ".", "datetime", ")", "Returns", ":", "updated_panel", "(", "scout", ".", "models", ".", "GenePanel", ")", ":", "The", "updated", "gene", "panel", "object" ]
python
test
numenta/nupic
src/nupic/algorithms/backtracking_tm.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm.py#L2442-L2545
def _updateLearningState(self, activeColumns): """ Update the learning state. Called from compute() on every iteration :param activeColumns List of active column indices """ # Copy predicted and active states into t-1 self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :] self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :] # Update our learning input history if self.maxLrnBacktrack > 0: if len(self._prevLrnPatterns) > self.maxLrnBacktrack: self._prevLrnPatterns.pop(0) self._prevLrnPatterns.append(activeColumns) if self.verbosity >= 4: print "Previous learn patterns: \n" print self._prevLrnPatterns # Process queued up segment updates, now that we have bottom-up, we # can update the permanences on the cells that we predicted to turn on # and did receive bottom-up self._processSegmentUpdates(activeColumns) # Decrement the PAM counter if it is running and increment our learned # sequence length if self.pamCounter > 0: self.pamCounter -= 1 self.learnedSeqLength += 1 # Phase 1 - turn on the predicted cell in each column that received # bottom-up. If there was no predicted cell, pick one to learn to. if not self.resetCalled: # Uses lrnActiveState['t-1'] and lrnPredictedState['t-1'] # computes lrnActiveState['t'] inSequence = self._learnPhase1(activeColumns) # Reset our PAM counter if we are in sequence if inSequence: self.pamCounter = self.pamLength # Print status of PAM counter, learned sequence length if self.verbosity >= 3: print "pamCounter = ", self.pamCounter, "seqLength = ", \ self.learnedSeqLength # Start over on start cells if any of the following occur: # 1.) A reset was just called # 2.) We have been loo long out of sequence (the pamCounter has expired) # 3.) We have reached maximum allowed sequence length. # # Note that, unless we are following a reset, we also just learned or # re-enforced connections to the current set of active columns because # this input is still a valid prediction to learn. # # It is especially helpful to learn the connections to this input when # you have a maxSeqLength constraint in place. Otherwise, you will have # no continuity at all between sub-sequences of length maxSeqLength. if (self.resetCalled or self.pamCounter == 0 or (self.maxSeqLength != 0 and self.learnedSeqLength >= self.maxSeqLength)): if self.verbosity >= 3: if self.resetCalled: print "Starting over:", activeColumns, "(reset was called)" elif self.pamCounter == 0: print "Starting over:", activeColumns, "(PAM counter expired)" else: print "Starting over:", activeColumns, "(reached maxSeqLength)" # Update average learned sequence length - this is a diagnostic statistic if self.pamCounter == 0: seqLength = self.learnedSeqLength - self.pamLength else: seqLength = self.learnedSeqLength if self.verbosity >= 3: print " learned sequence length was:", seqLength self._updateAvgLearnedSeqLength(seqLength) # Backtrack to an earlier starting point, if we find one backSteps = 0 if not self.resetCalled: backSteps = self._learnBacktrack() # Start over in the current time step if reset was called, or we couldn't # backtrack. if self.resetCalled or backSteps is None or backSteps == 0: backSteps = 0 self.lrnActiveState['t'].fill(0) for c in activeColumns: self.lrnActiveState['t'][c, 0] = 1 # Remove any old input history patterns self._prevLrnPatterns = [] # Reset PAM counter self.pamCounter = self.pamLength self.learnedSeqLength = backSteps # Clear out any old segment updates from prior sequences self.segmentUpdates = {} # Phase 2 - Compute new predicted state. When computing predictions for # phase 2, we predict at most one cell per column (the one with the best # matching segment). self._learnPhase2()
[ "def", "_updateLearningState", "(", "self", ",", "activeColumns", ")", ":", "# Copy predicted and active states into t-1", "self", ".", "lrnPredictedState", "[", "'t-1'", "]", "[", ":", ",", ":", "]", "=", "self", ".", "lrnPredictedState", "[", "'t'", "]", "[", ":", ",", ":", "]", "self", ".", "lrnActiveState", "[", "'t-1'", "]", "[", ":", ",", ":", "]", "=", "self", ".", "lrnActiveState", "[", "'t'", "]", "[", ":", ",", ":", "]", "# Update our learning input history", "if", "self", ".", "maxLrnBacktrack", ">", "0", ":", "if", "len", "(", "self", ".", "_prevLrnPatterns", ")", ">", "self", ".", "maxLrnBacktrack", ":", "self", ".", "_prevLrnPatterns", ".", "pop", "(", "0", ")", "self", ".", "_prevLrnPatterns", ".", "append", "(", "activeColumns", ")", "if", "self", ".", "verbosity", ">=", "4", ":", "print", "\"Previous learn patterns: \\n\"", "print", "self", ".", "_prevLrnPatterns", "# Process queued up segment updates, now that we have bottom-up, we", "# can update the permanences on the cells that we predicted to turn on", "# and did receive bottom-up", "self", ".", "_processSegmentUpdates", "(", "activeColumns", ")", "# Decrement the PAM counter if it is running and increment our learned", "# sequence length", "if", "self", ".", "pamCounter", ">", "0", ":", "self", ".", "pamCounter", "-=", "1", "self", ".", "learnedSeqLength", "+=", "1", "# Phase 1 - turn on the predicted cell in each column that received", "# bottom-up. If there was no predicted cell, pick one to learn to.", "if", "not", "self", ".", "resetCalled", ":", "# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']", "# computes lrnActiveState['t']", "inSequence", "=", "self", ".", "_learnPhase1", "(", "activeColumns", ")", "# Reset our PAM counter if we are in sequence", "if", "inSequence", ":", "self", ".", "pamCounter", "=", "self", ".", "pamLength", "# Print status of PAM counter, learned sequence length", "if", "self", ".", "verbosity", ">=", "3", ":", "print", "\"pamCounter = \"", ",", "self", ".", "pamCounter", ",", "\"seqLength = \"", ",", "self", ".", "learnedSeqLength", "# Start over on start cells if any of the following occur:", "# 1.) A reset was just called", "# 2.) We have been loo long out of sequence (the pamCounter has expired)", "# 3.) We have reached maximum allowed sequence length.", "#", "# Note that, unless we are following a reset, we also just learned or", "# re-enforced connections to the current set of active columns because", "# this input is still a valid prediction to learn.", "#", "# It is especially helpful to learn the connections to this input when", "# you have a maxSeqLength constraint in place. Otherwise, you will have", "# no continuity at all between sub-sequences of length maxSeqLength.", "if", "(", "self", ".", "resetCalled", "or", "self", ".", "pamCounter", "==", "0", "or", "(", "self", ".", "maxSeqLength", "!=", "0", "and", "self", ".", "learnedSeqLength", ">=", "self", ".", "maxSeqLength", ")", ")", ":", "if", "self", ".", "verbosity", ">=", "3", ":", "if", "self", ".", "resetCalled", ":", "print", "\"Starting over:\"", ",", "activeColumns", ",", "\"(reset was called)\"", "elif", "self", ".", "pamCounter", "==", "0", ":", "print", "\"Starting over:\"", ",", "activeColumns", ",", "\"(PAM counter expired)\"", "else", ":", "print", "\"Starting over:\"", ",", "activeColumns", ",", "\"(reached maxSeqLength)\"", "# Update average learned sequence length - this is a diagnostic statistic", "if", "self", ".", "pamCounter", "==", "0", ":", "seqLength", "=", "self", ".", "learnedSeqLength", "-", "self", ".", "pamLength", "else", ":", "seqLength", "=", "self", ".", "learnedSeqLength", "if", "self", ".", "verbosity", ">=", "3", ":", "print", "\" learned sequence length was:\"", ",", "seqLength", "self", ".", "_updateAvgLearnedSeqLength", "(", "seqLength", ")", "# Backtrack to an earlier starting point, if we find one", "backSteps", "=", "0", "if", "not", "self", ".", "resetCalled", ":", "backSteps", "=", "self", ".", "_learnBacktrack", "(", ")", "# Start over in the current time step if reset was called, or we couldn't", "# backtrack.", "if", "self", ".", "resetCalled", "or", "backSteps", "is", "None", "or", "backSteps", "==", "0", ":", "backSteps", "=", "0", "self", ".", "lrnActiveState", "[", "'t'", "]", ".", "fill", "(", "0", ")", "for", "c", "in", "activeColumns", ":", "self", ".", "lrnActiveState", "[", "'t'", "]", "[", "c", ",", "0", "]", "=", "1", "# Remove any old input history patterns", "self", ".", "_prevLrnPatterns", "=", "[", "]", "# Reset PAM counter", "self", ".", "pamCounter", "=", "self", ".", "pamLength", "self", ".", "learnedSeqLength", "=", "backSteps", "# Clear out any old segment updates from prior sequences", "self", ".", "segmentUpdates", "=", "{", "}", "# Phase 2 - Compute new predicted state. When computing predictions for", "# phase 2, we predict at most one cell per column (the one with the best", "# matching segment).", "self", ".", "_learnPhase2", "(", ")" ]
Update the learning state. Called from compute() on every iteration :param activeColumns List of active column indices
[ "Update", "the", "learning", "state", ".", "Called", "from", "compute", "()", "on", "every", "iteration", ":", "param", "activeColumns", "List", "of", "active", "column", "indices" ]
python
valid
bitesofcode/projexui
projexui/widgets/xchart/xchart.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchart.py#L258-L267
def axis(self, name): """ Looks up an axis for this chart by the given name. :return <projexui.widgets.xchart.XChartAxis> || None """ for axis in self.axes(): if axis.name() == name: return axis return None
[ "def", "axis", "(", "self", ",", "name", ")", ":", "for", "axis", "in", "self", ".", "axes", "(", ")", ":", "if", "axis", ".", "name", "(", ")", "==", "name", ":", "return", "axis", "return", "None" ]
Looks up an axis for this chart by the given name. :return <projexui.widgets.xchart.XChartAxis> || None
[ "Looks", "up", "an", "axis", "for", "this", "chart", "by", "the", "given", "name", ".", ":", "return", "<projexui", ".", "widgets", ".", "xchart", ".", "XChartAxis", ">", "||", "None" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L12-L32
def mac_address_table_static_mac_address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table") static = ET.SubElement(mac_address_table, "static") forward_key = ET.SubElement(static, "forward") forward_key.text = kwargs.pop('forward') interface_type_key = ET.SubElement(static, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(static, "interface-name") interface_name_key.text = kwargs.pop('interface_name') vlan_key = ET.SubElement(static, "vlan") vlan_key.text = kwargs.pop('vlan') vlanid_key = ET.SubElement(static, "vlanid") vlanid_key.text = kwargs.pop('vlanid') mac_address = ET.SubElement(static, "mac-address") mac_address.text = kwargs.pop('mac_address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "mac_address_table_static_mac_address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "mac_address_table", "=", "ET", ".", "SubElement", "(", "config", ",", "\"mac-address-table\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-mac-address-table\"", ")", "static", "=", "ET", ".", "SubElement", "(", "mac_address_table", ",", "\"static\"", ")", "forward_key", "=", "ET", ".", "SubElement", "(", "static", ",", "\"forward\"", ")", "forward_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'forward'", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "static", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name_key", "=", "ET", ".", "SubElement", "(", "static", ",", "\"interface-name\"", ")", "interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "vlan_key", "=", "ET", ".", "SubElement", "(", "static", ",", "\"vlan\"", ")", "vlan_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'vlan'", ")", "vlanid_key", "=", "ET", ".", "SubElement", "(", "static", ",", "\"vlanid\"", ")", "vlanid_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'vlanid'", ")", "mac_address", "=", "ET", ".", "SubElement", "(", "static", ",", "\"mac-address\"", ")", "mac_address", ".", "text", "=", "kwargs", ".", "pop", "(", "'mac_address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
bodylabs/harrison
harrison/registered_timer.py
https://github.com/bodylabs/harrison/blob/8a05b5c997909a75480b3fccacb2bfff888abfc7/harrison/registered_timer.py#L7-L33
def aggregate_registry_timers(): """Returns a list of aggregate timing information for registered timers. Each element is a 3-tuple of - timer description - aggregate elapsed time - number of calls The list is sorted by the first start time of each aggregate timer. """ import itertools timers = sorted(shared_registry.values(), key=lambda t: t.desc) aggregate_timers = [] for k, g in itertools.groupby(timers, key=lambda t: t.desc): group = list(g) num_calls = len(group) total_elapsed_ms = sum(t.elapsed_time_ms for t in group) first_start_time = min(t.start_time for t in group) # We'll use the first start time as a sort key. aggregate_timers.append( (first_start_time, (k, total_elapsed_ms, num_calls))) aggregate_timers.sort() return zip(*aggregate_timers)[1]
[ "def", "aggregate_registry_timers", "(", ")", ":", "import", "itertools", "timers", "=", "sorted", "(", "shared_registry", ".", "values", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", ".", "desc", ")", "aggregate_timers", "=", "[", "]", "for", "k", ",", "g", "in", "itertools", ".", "groupby", "(", "timers", ",", "key", "=", "lambda", "t", ":", "t", ".", "desc", ")", ":", "group", "=", "list", "(", "g", ")", "num_calls", "=", "len", "(", "group", ")", "total_elapsed_ms", "=", "sum", "(", "t", ".", "elapsed_time_ms", "for", "t", "in", "group", ")", "first_start_time", "=", "min", "(", "t", ".", "start_time", "for", "t", "in", "group", ")", "# We'll use the first start time as a sort key.", "aggregate_timers", ".", "append", "(", "(", "first_start_time", ",", "(", "k", ",", "total_elapsed_ms", ",", "num_calls", ")", ")", ")", "aggregate_timers", ".", "sort", "(", ")", "return", "zip", "(", "*", "aggregate_timers", ")", "[", "1", "]" ]
Returns a list of aggregate timing information for registered timers. Each element is a 3-tuple of - timer description - aggregate elapsed time - number of calls The list is sorted by the first start time of each aggregate timer.
[ "Returns", "a", "list", "of", "aggregate", "timing", "information", "for", "registered", "timers", "." ]
python
train
kivy/python-for-android
pythonforandroid/recommendations.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/recommendations.py#L97-L107
def check_ndk_api(ndk_api, android_api): """Warn if the user's NDK is too high or low.""" if ndk_api > android_api: raise BuildInterruptingException( 'Target NDK API is {}, higher than the target Android API {}.'.format( ndk_api, android_api), instructions=('The NDK API is a minimum supported API number and must be lower ' 'than the target Android API')) if ndk_api < MIN_NDK_API: warning(OLD_NDK_API_MESSAGE)
[ "def", "check_ndk_api", "(", "ndk_api", ",", "android_api", ")", ":", "if", "ndk_api", ">", "android_api", ":", "raise", "BuildInterruptingException", "(", "'Target NDK API is {}, higher than the target Android API {}.'", ".", "format", "(", "ndk_api", ",", "android_api", ")", ",", "instructions", "=", "(", "'The NDK API is a minimum supported API number and must be lower '", "'than the target Android API'", ")", ")", "if", "ndk_api", "<", "MIN_NDK_API", ":", "warning", "(", "OLD_NDK_API_MESSAGE", ")" ]
Warn if the user's NDK is too high or low.
[ "Warn", "if", "the", "user", "s", "NDK", "is", "too", "high", "or", "low", "." ]
python
train
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L1705-L1738
def path_exists(value, allow_empty = False, **kwargs): """Validate that ``value`` is a path-like object that exists on the local filesystem. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: The file name represented by ``value``. :rtype: Path-like object / :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty :raises NotPathlikeError: if ``value`` is not a path-like object :raises PathExistsError: if ``value`` does not exist """ if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None value = path(value, force_run = True) # pylint: disable=E1123 if not os.path.exists(value): raise errors.PathExistsError('value (%s) not found' % value) return value
[ "def", "path_exists", "(", "value", ",", "allow_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "value", "=", "path", "(", "value", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "if", "not", "os", ".", "path", ".", "exists", "(", "value", ")", ":", "raise", "errors", ".", "PathExistsError", "(", "'value (%s) not found'", "%", "value", ")", "return", "value" ]
Validate that ``value`` is a path-like object that exists on the local filesystem. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: The file name represented by ``value``. :rtype: Path-like object / :obj:`None <python:None>` :raises EmptyValueError: if ``allow_empty`` is ``False`` and ``value`` is empty :raises NotPathlikeError: if ``value`` is not a path-like object :raises PathExistsError: if ``value`` does not exist
[ "Validate", "that", "value", "is", "a", "path", "-", "like", "object", "that", "exists", "on", "the", "local", "filesystem", "." ]
python
train
fastai/fastai
fastai/text/data.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/data.py#L209-L223
def from_csv(cls, path:PathOrStr, csv_name, valid_pct:float=0.2, test:Optional[str]=None, tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, delimiter:str=None, header='infer', text_cols:IntsOrStrs=1, label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000, min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch: "Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation." df = pd.read_csv(Path(path)/csv_name, header=header, delimiter=delimiter) df = df.iloc[np.random.permutation(len(df))] cut = int(valid_pct * len(df)) + 1 train_df, valid_df = df[cut:], df[:cut] test_df = None if test is None else pd.read_csv(Path(path)/test, header=header, delimiter=delimiter) return cls.from_df(path, train_df, valid_df, test_df, tokenizer=tokenizer, vocab=vocab, classes=classes, text_cols=text_cols, label_cols=label_cols, label_delim=label_delim, chunksize=chunksize, max_vocab=max_vocab, min_freq=min_freq, mark_fields=mark_fields, include_bos=include_bos, include_eos=include_eos, **kwargs)
[ "def", "from_csv", "(", "cls", ",", "path", ":", "PathOrStr", ",", "csv_name", ",", "valid_pct", ":", "float", "=", "0.2", ",", "test", ":", "Optional", "[", "str", "]", "=", "None", ",", "tokenizer", ":", "Tokenizer", "=", "None", ",", "vocab", ":", "Vocab", "=", "None", ",", "classes", ":", "Collection", "[", "str", "]", "=", "None", ",", "delimiter", ":", "str", "=", "None", ",", "header", "=", "'infer'", ",", "text_cols", ":", "IntsOrStrs", "=", "1", ",", "label_cols", ":", "IntsOrStrs", "=", "0", ",", "label_delim", ":", "str", "=", "None", ",", "chunksize", ":", "int", "=", "10000", ",", "max_vocab", ":", "int", "=", "60000", ",", "min_freq", ":", "int", "=", "2", ",", "mark_fields", ":", "bool", "=", "False", ",", "include_bos", ":", "bool", "=", "True", ",", "include_eos", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", "->", "DataBunch", ":", "df", "=", "pd", ".", "read_csv", "(", "Path", "(", "path", ")", "/", "csv_name", ",", "header", "=", "header", ",", "delimiter", "=", "delimiter", ")", "df", "=", "df", ".", "iloc", "[", "np", ".", "random", ".", "permutation", "(", "len", "(", "df", ")", ")", "]", "cut", "=", "int", "(", "valid_pct", "*", "len", "(", "df", ")", ")", "+", "1", "train_df", ",", "valid_df", "=", "df", "[", "cut", ":", "]", ",", "df", "[", ":", "cut", "]", "test_df", "=", "None", "if", "test", "is", "None", "else", "pd", ".", "read_csv", "(", "Path", "(", "path", ")", "/", "test", ",", "header", "=", "header", ",", "delimiter", "=", "delimiter", ")", "return", "cls", ".", "from_df", "(", "path", ",", "train_df", ",", "valid_df", ",", "test_df", ",", "tokenizer", "=", "tokenizer", ",", "vocab", "=", "vocab", ",", "classes", "=", "classes", ",", "text_cols", "=", "text_cols", ",", "label_cols", "=", "label_cols", ",", "label_delim", "=", "label_delim", ",", "chunksize", "=", "chunksize", ",", "max_vocab", "=", "max_vocab", ",", "min_freq", "=", "min_freq", ",", "mark_fields", "=", "mark_fields", ",", "include_bos", "=", "include_bos", ",", "include_eos", "=", "include_eos", ",", "*", "*", "kwargs", ")" ]
Create a `TextDataBunch` from texts in csv files. `kwargs` are passed to the dataloader creation.
[ "Create", "a", "TextDataBunch", "from", "texts", "in", "csv", "files", ".", "kwargs", "are", "passed", "to", "the", "dataloader", "creation", "." ]
python
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/collection.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L94-L111
def document(self, document_id=None): """Create a sub-document underneath the current collection. Args: document_id (Optional[str]): The document identifier within the current collection. If not provided, will default to a random 20 character string composed of digits, uppercase and lowercase and letters. Returns: ~.firestore_v1beta1.document.DocumentReference: The child document. """ if document_id is None: document_id = _auto_id() child_path = self._path + (document_id,) return self._client.document(*child_path)
[ "def", "document", "(", "self", ",", "document_id", "=", "None", ")", ":", "if", "document_id", "is", "None", ":", "document_id", "=", "_auto_id", "(", ")", "child_path", "=", "self", ".", "_path", "+", "(", "document_id", ",", ")", "return", "self", ".", "_client", ".", "document", "(", "*", "child_path", ")" ]
Create a sub-document underneath the current collection. Args: document_id (Optional[str]): The document identifier within the current collection. If not provided, will default to a random 20 character string composed of digits, uppercase and lowercase and letters. Returns: ~.firestore_v1beta1.document.DocumentReference: The child document.
[ "Create", "a", "sub", "-", "document", "underneath", "the", "current", "collection", "." ]
python
train
pandas-dev/pandas
pandas/core/groupby/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1499-L1564
def nunique(self, dropna=True): """ Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ obj = self._selected_obj def groupby_series(obj, col=None): return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(dropna=dropna) if isinstance(obj, Series): results = groupby_series(obj) else: from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results
[ "def", "nunique", "(", "self", ",", "dropna", "=", "True", ")", ":", "obj", "=", "self", ".", "_selected_obj", "def", "groupby_series", "(", "obj", ",", "col", "=", "None", ")", ":", "return", "SeriesGroupBy", "(", "obj", ",", "selection", "=", "col", ",", "grouper", "=", "self", ".", "grouper", ")", ".", "nunique", "(", "dropna", "=", "dropna", ")", "if", "isinstance", "(", "obj", ",", "Series", ")", ":", "results", "=", "groupby_series", "(", "obj", ")", "else", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "results", "=", "[", "groupby_series", "(", "obj", "[", "col", "]", ",", "col", ")", "for", "col", "in", "obj", ".", "columns", "]", "results", "=", "concat", "(", "results", ",", "axis", "=", "1", ")", "results", ".", "columns", ".", "names", "=", "obj", ".", "columns", ".", "names", "if", "not", "self", ".", "as_index", ":", "results", ".", "index", "=", "ibase", ".", "default_index", "(", "len", "(", "results", ")", ")", "return", "results" ]
Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y
[ "Return", "DataFrame", "with", "number", "of", "distinct", "observations", "per", "group", "for", "each", "column", "." ]
python
train
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L894-L929
def update_account_password_policy(allow_users_to_change_password=None, hard_expiry=None, max_password_age=None, minimum_password_length=None, password_reuse_prevention=None, require_lowercase_characters=None, require_numbers=None, require_symbols=None, require_uppercase_characters=None, region=None, key=None, keyid=None, profile=None): ''' Update the password policy for the AWS account. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.update_account_password_policy True ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.update_account_password_policy(allow_users_to_change_password, hard_expiry, max_password_age, minimum_password_length, password_reuse_prevention, require_lowercase_characters, require_numbers, require_symbols, require_uppercase_characters) log.info('The password policy has been updated.') return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to update the password policy' log.error(msg) return False
[ "def", "update_account_password_policy", "(", "allow_users_to_change_password", "=", "None", ",", "hard_expiry", "=", "None", ",", "max_password_age", "=", "None", ",", "minimum_password_length", "=", "None", ",", "password_reuse_prevention", "=", "None", ",", "require_lowercase_characters", "=", "None", ",", "require_numbers", "=", "None", ",", "require_symbols", "=", "None", ",", "require_uppercase_characters", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "conn", ".", "update_account_password_policy", "(", "allow_users_to_change_password", ",", "hard_expiry", ",", "max_password_age", ",", "minimum_password_length", ",", "password_reuse_prevention", ",", "require_lowercase_characters", ",", "require_numbers", ",", "require_symbols", ",", "require_uppercase_characters", ")", "log", ".", "info", "(", "'The password policy has been updated.'", ")", "return", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "msg", "=", "'Failed to update the password policy'", "log", ".", "error", "(", "msg", ")", "return", "False" ]
Update the password policy for the AWS account. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.update_account_password_policy True
[ "Update", "the", "password", "policy", "for", "the", "AWS", "account", "." ]
python
train